bitkeeper revision 1.1007 (40d9a9b67QWaIdCVPMQU8ujlBGA1nQ)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 23 Jun 2004 16:03:02 +0000 (16:03 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 23 Jun 2004 16:03:02 +0000 (16:03 +0000)
Remove GFP_* flags from memory allocators.
Something similar may get added back to the buddy allocator if it
gets used for allocating domain memory on some architectures.

30 files changed:
xen/arch/x86/acpi.c
xen/arch/x86/apic.c
xen/arch/x86/io_apic.c
xen/arch/x86/irq.c
xen/arch/x86/mm.c
xen/arch/x86/mpparse.c
xen/arch/x86/pci-pc.c
xen/arch/x86/pdb-stub.c
xen/arch/x86/smpboot.c
xen/common/ac_timer.c
xen/common/dom0_ops.c
xen/common/domain.c
xen/common/event_channel.c
xen/common/page_alloc.c
xen/common/physdev.c
xen/common/resource.c
xen/common/sched_atropos.c
xen/common/sched_bvt.c
xen/common/schedule.c
xen/common/shadow.c
xen/common/slab.c
xen/common/trace.c
xen/drivers/char/console.c
xen/drivers/pci/pci.c
xen/drivers/pci/setup-res.c
xen/include/asm-x86/page.h
xen/include/asm-x86/x86_64/page.h
xen/include/xen/mm.h
xen/include/xen/shadow.h
xen/include/xen/slab.h

index ed16dc0148bca95593d28a44c504dd3e01776165..16e79d00c43080aeb485351f34ffa0a2d2358007 100644 (file)
@@ -578,7 +578,7 @@ static void acpi_create_identity_pmd (void)
        pgd_t *pgd;
        int i;
 
-       ptep = (pte_t*)__get_free_page(GFP_KERNEL);
+       ptep = (pte_t*)__get_free_page();
 
        /* fill page with low mapping */
        for (i = 0; i < PTRS_PER_PTE; i++)
index 6831c3021ad5cf5c10a68ede857a2fed8a8bfd17..cd1733c17375de6239e76515837d79502988c15a 100644 (file)
@@ -445,7 +445,7 @@ void __init init_apic_mappings(void)
      * simulate the local APIC and another one for the IO-APIC.
      */
     if (!smp_found_config && detect_init_APIC()) {
-        apic_phys = get_free_page(GFP_KERNEL);
+        apic_phys = get_free_page();
         apic_phys = __pa(apic_phys);
     } else
         apic_phys = mp_lapic_addr;
index 6e80ed571c37623c7fce1078b468eb3e057243cf..137f4e74b0d2a741429f2354b4c387dce5433483 100644 (file)
@@ -688,7 +688,7 @@ static struct hw_interrupt_type ioapic_level_irq_type = {
 void __init setup_IO_APIC_irqs(void)
 {
        struct IO_APIC_route_entry entry;
-       int apic, pin, idx, irq, first_notcon = 1, vector;
+       int apic, pin, idx, irq, vector;
        unsigned long flags;
 
        printk(KERN_DEBUG "init IO_APIC IRQs\n");
@@ -707,14 +707,8 @@ void __init setup_IO_APIC_irqs(void)
                entry.dest.logical.logical_dest = target_cpus();
 
                idx = find_irq_entry(apic,pin,mp_INT);
-               if (idx == -1) {
-                       if (first_notcon) {
-                               printk(KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-                               first_notcon = 0;
-                       } else
-                               printk(", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
+               if (idx == -1)
                        continue;
-               }
 
                entry.trigger = irq_trigger(idx);
                entry.polarity = irq_polarity(idx);
@@ -758,9 +752,6 @@ void __init setup_IO_APIC_irqs(void)
                spin_unlock_irqrestore(&ioapic_lock, flags);
        }
        }
-
-       if (!first_notcon)
-               printk(" not connected.\n");
 }
 
 /*
index 1a5eb0b8d21f0de4eac3c0686b6ddf2735e28cdc..768246908fd4a2d4daf0b9659fc22afd7bb65d12 100644 (file)
@@ -254,7 +254,7 @@ int pirq_guest_bind(struct domain *p, int irq, int will_share)
             goto out;
         }
 
-        action = kmalloc(sizeof(irq_guest_action_t), GFP_KERNEL);
+        action = kmalloc(sizeof(irq_guest_action_t));
         if ( (desc->action = (struct irqaction *)action) == NULL )
         {
             DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
index f9f739265e813b732a90e7d0037767bb2b08ddc1..8556b57d45d466da41a7d8611c2b3f8290c15611 100644 (file)
@@ -70,7 +70,7 @@ static void __init fixrange_init(unsigned long start,
     {
         if ( !l2_pgentry_empty(*l2e) )
             continue;
-        page = (unsigned long)get_free_page(GFP_KERNEL);
+        page = (unsigned long)get_free_page();
         clear_page(page);
         *l2e = mk_l2_pgentry(__pa(page) | __PAGE_HYPERVISOR);
         vaddr += 1 << L2_PAGETABLE_SHIFT;
@@ -97,7 +97,7 @@ void __init paging_init(void)
     fixrange_init(addr, 0, idle_pg_table);
 
     /* Create page table for ioremap(). */
-    ioremap_pt = (void *)get_free_page(GFP_KERNEL);
+    ioremap_pt = (void *)get_free_page();
     clear_page(ioremap_pt);
     idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] = 
         mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
@@ -109,7 +109,7 @@ void __init paging_init(void)
                    (RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT));
 
     /* Set up mapping cache for domain pages. */
-    mapcache = (unsigned long *)get_free_page(GFP_KERNEL);
+    mapcache = (unsigned long *)get_free_page();
     clear_page(mapcache);
     idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
         mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
index 3a77390ba94413332cf6a0a2781e29e170afbca1..ae1e1da93bf50cc0fe743a95045d28bbc867234f 100644 (file)
@@ -509,7 +509,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
        
        count = (max_mp_busses * sizeof(int)) * 4;
        count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
-       bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(count));
+       bus_data = (void *)__get_free_pages(get_order(count));
        if (!bus_data) {
                printk(KERN_ERR "SMP mptable: out of memory!\n");
                return 0;
@@ -694,7 +694,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
                struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
        } *bus_data;
 
-       bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(sizeof(*bus_data)));
+       bus_data = (void *)__get_free_pages(get_order(sizeof(*bus_data)));
        if (!bus_data)
                panic("SMP mptable: out of memory!\n");
        mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
@@ -1171,7 +1171,7 @@ void __init mp_config_acpi_legacy_irqs (void)
 
        count = (MAX_MP_BUSSES * sizeof(int)) * 4;
        count += (MAX_IRQ_SOURCES * sizeof(int)) * 4;
-       bus_data = (void *)__get_free_pages(GFP_KERNEL, get_order(count));
+       bus_data = (void *)__get_free_pages(get_order(count));
        if (!bus_data) {
                panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
        }
index eac8d5bb4f93694574692f0d7018f3c103dbe698..488fab91727e03f242428333b5b8846a8b99e65d 100644 (file)
@@ -1003,7 +1003,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
 
        if (!pci_bios_present)
                return NULL;
-       page = __get_free_page(GFP_KERNEL);
+       page = __get_free_page();
        if (!page)
                return NULL;
        opt.table = (struct irq_info *) page;
@@ -1030,7 +1030,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
        if (ret & 0xff00)
                printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
        else if (opt.size) {
-               rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL);
+               rt = kmalloc(sizeof(struct irq_routing_table) + opt.size);
                if (rt) {
                        memset(rt, 0, sizeof(struct irq_routing_table));
                        rt->size = opt.size + sizeof(struct irq_routing_table);
index c24a86ccdc5b992b79bce0fc84441d0fceb59b8f..532db8eb14a4bf1a768f8d8a5dd1b6de89aa782d 100644 (file)
@@ -836,7 +836,7 @@ struct pdb_breakpoint breakpoints;
 
 void pdb_bkpt_add (unsigned long cr3, unsigned long address)
 {
-    struct pdb_breakpoint *bkpt = kmalloc(sizeof(*bkpt), GFP_KERNEL);
+    struct pdb_breakpoint *bkpt = kmalloc(sizeof(*bkpt));
     bkpt->cr3 = cr3;
     bkpt->address = address;
     list_add(&bkpt->list, &breakpoints.list);
index ff8bcb7faab345cd8279495ed94de6e7cb15bc63..6a643ada2440556d627beb52cea49d103060dfa0 100644 (file)
@@ -406,7 +406,7 @@ void __init start_secondary(void)
      * At this point, boot CPU has fully initialised the IDT. It is
      * now safe to make ourselves a private copy.
      */
-    idt_tables[cpu] = kmalloc(IDT_ENTRIES*8, GFP_KERNEL);
+    idt_tables[cpu] = kmalloc(IDT_ENTRIES*8);
     memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
     *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
     *(unsigned long  *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
@@ -669,7 +669,7 @@ static void __init do_boot_cpu (int apicid)
     /* So we see what's up. */
     printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
 
-    stack = __pa(__get_free_pages(GFP_KERNEL, 1));
+    stack = __pa(__get_free_pages(1));
     stack_start.esp = stack + STACK_SIZE - STACK_RESERVED;
 
     /* Debug build: detect stack overflow by setting up a guard page. */
index d58ca7194bdc00a2271e37659c8b8c2e1e0b373e..f28a783400a8ed3209b7f76ec92220ae4541ad50 100644 (file)
@@ -130,10 +130,9 @@ static int add_entry(struct ac_timer **heap, struct ac_timer *t)
     if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
     {
         int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
-        struct ac_timer **new_heap = kmalloc(
-            limit * sizeof(struct ac_timer *), GFP_KERNEL);
+        struct ac_timer **new_heap = kmalloc(limit*sizeof(struct ac_timer *));
         if ( new_heap == NULL ) BUG();
-        memcpy(new_heap, heap, (limit>>1) * sizeof(struct ac_timer *));
+        memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
         for ( i = 0; i < smp_num_cpus; i++ )
             if ( ac_timers[i].heap == heap )
                 ac_timers[i].heap = new_heap;
@@ -280,7 +279,7 @@ void __init ac_timer_init(void)
     for ( i = 0; i < smp_num_cpus; i++ )
     {
         ac_timers[i].heap = kmalloc(
-            (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *), GFP_KERNEL);
+            (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *));
         if ( ac_timers[i].heap == NULL ) BUG();
         SET_HEAP_SIZE(ac_timers[i].heap, 0);
         SET_HEAP_LIMIT(ac_timers[i].heap, DEFAULT_HEAP_LIMIT);
index f999bd45368378869f927bd96f32d1795a9b6372..83cc82f6edd010116d7cfe88c5fe648e17e02cf5 100644 (file)
@@ -318,7 +318,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
 
         if ( op->u.getdomaininfo.ctxt != NULL )
         {
-            if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
+            if ( (c = kmalloc(sizeof(*c))) == NULL )
             {
                 ret = -ENOMEM;
                 put_domain(d);
index a50bc24badfda822ace8d4a8f86e4ccef95a3c30..02ae4c42c46aaa00b9bd659bc6948746079d91a4 100644 (file)
@@ -84,13 +84,13 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
         INIT_LIST_HEAD(&p->page_list);
         p->max_pages = p->tot_pages = 0;
 
-        p->shared_info = (void *)get_free_page(GFP_KERNEL);
+        p->shared_info = (void *)get_free_page();
         memset(p->shared_info, 0, PAGE_SIZE);
         SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), p);
         machine_to_phys_mapping[virt_to_phys(p->shared_info) >> 
                                PAGE_SHIFT] = 0x80000000UL;  /* debug */
 
-        p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
+        p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page();
         memset(p->mm.perdomain_pt, 0, PAGE_SIZE);
         machine_to_phys_mapping[virt_to_phys(p->mm.perdomain_pt) >> 
                                PAGE_SHIFT] = 0x0fffdeadUL;  /* debug */
@@ -474,7 +474,7 @@ int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
     int i, rc = 0;
     full_execution_context_t *c;
 
-    if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
+    if ( (c = kmalloc(sizeof(*c))) == NULL )
         return -ENOMEM;
 
     if ( test_bit(DF_CONSTRUCTED, &p->flags) )
index 813519c923363526e21e1cb5772ed0e34bf1071c..840496d18207acd6979c7092825a85c91ab0a8f6 100644 (file)
@@ -48,7 +48,7 @@ static int get_free_port(struct domain *d)
         
         max *= 2;
         
-        chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL);
+        chn = kmalloc(max * sizeof(event_channel_t));
         if ( unlikely(chn == NULL) )
             return -ENOMEM;
 
@@ -483,8 +483,7 @@ long do_event_channel_op(evtchn_op_t *uop)
 int init_event_channels(struct domain *d)
 {
     spin_lock_init(&d->event_channel_lock);
-    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t), 
-                               GFP_KERNEL);
+    d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
     if ( unlikely(d->event_channel == NULL) )
         return -ENOMEM;
     d->max_event_channel = INIT_EVENT_CHANNELS;
index 48a35e0017a2957cc1b711937f96eceddbde1457..113d2f5fba364c38ea7e0fad40509891c053b7c4 100644 (file)
@@ -263,7 +263,7 @@ void __init init_page_allocator(unsigned long min, unsigned long max)
 
 
 /* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
-unsigned long __get_free_pages(int mask, int order)
+unsigned long __get_free_pages(int order)
 {
     int i, attempts = 0;
     chunk_head_t *alloc_ch, *spare_ch;
@@ -321,7 +321,7 @@ retry:
         
     if ( attempts++ < 8 )
     {
-        kmem_cache_reap(0);
+        kmem_cache_reap();
         goto retry;
     }
 
index a74a3d4e6bcb4babe02cc30234dd22e90e5cd7b2..2cbfd9ec354475c2afb7e8d7d04a3d374b8860fe 100644 (file)
@@ -98,7 +98,7 @@ static void add_dev_to_task(struct domain *p,
         return;
     }
 
-    if ( !(pdev = kmalloc(sizeof(phys_dev_t), GFP_KERNEL)) )
+    if ( (pdev = kmalloc(sizeof(phys_dev_t))) == NULL )
     {
         INFO("Error allocating pdev structure.\n");
         return;
@@ -171,8 +171,7 @@ int physdev_pci_access_modify(
 
     if ( p->io_bitmap == NULL )
     {
-        p->io_bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-        if ( p->io_bitmap == NULL )
+        if ( (p->io_bitmap = kmalloc(IO_BITMAP_BYTES)) == NULL )
         {
             rc = -ENOMEM;
             goto out;
@@ -738,7 +737,7 @@ void physdev_init_dom0(struct domain *p)
         /* Skip bridges and other peculiarities for now. */
         if ( dev->hdr_type != PCI_HEADER_TYPE_NORMAL )
             continue;
-        pdev = kmalloc(sizeof(phys_dev_t), GFP_KERNEL);
+        pdev = kmalloc(sizeof(phys_dev_t));
         pdev->dev = dev;
         pdev->flags = ACC_WRITE;
         pdev->state = 0;
index 089050f55c07b410512635faf87b8c2cfa4fe94a..26855806a51cbbf2d583541c02ddc6af69158727 100644 (file)
@@ -220,7 +220,7 @@ int allocate_resource(struct resource *root, struct resource *new,
  */
 struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
 {
-       struct resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+       struct resource *res = kmalloc(sizeof(*res));
 
        if (res) {
                memset(res, 0, sizeof(*res));
index b2d758adc68c1de0450e22042799cd73a790d1bb..35a91e5b3382b333e1c2ebd2a8c66b731e57634e 100644 (file)
@@ -528,8 +528,7 @@ static int at_init_scheduler()
     
     for ( i = 0; i < NR_CPUS; i++ )
     {
-        schedule_data[i].sched_priv = kmalloc(sizeof(struct at_cpu_info),
-                                              GFP_KERNEL);
+        schedule_data[i].sched_priv = kmalloc(sizeof(struct at_cpu_info));
         if ( schedule_data[i].sched_priv == NULL )
             return -1;
         WAITQ(i)->next = WAITQ(i);
@@ -592,7 +591,7 @@ static int at_alloc_task(struct domain *p)
 {
     ASSERT(p != NULL);
 
-    p->sched_priv = kmem_cache_alloc(dom_info_cache, GFP_KERNEL);
+    p->sched_priv = kmem_cache_alloc(dom_info_cache);
     if( p->sched_priv == NULL )
         return -1;
 
index 26295f3693237fb6b56f3b7b9e6b843c3c03e8e5..afc0a6e5a6ceaca997683f001a32f05fde804e61 100644 (file)
@@ -96,7 +96,7 @@ static void __calc_evt(struct bvt_dom_info *inf)
  */
 int bvt_alloc_task(struct domain *p)
 {
-    if ( (BVT_INFO(p) = kmem_cache_alloc(dom_info_cache,GFP_KERNEL)) == NULL )
+    if ( (BVT_INFO(p) = kmem_cache_alloc(dom_info_cache)) == NULL )
         return -1;
     
     return 0;
@@ -410,8 +410,7 @@ int bvt_init_scheduler()
 
     for ( i = 0; i < NR_CPUS; i++ )
     {
-        CPU_INFO(i) = kmalloc(sizeof(struct bvt_cpu_info), GFP_KERNEL);
-
+        CPU_INFO(i) = kmalloc(sizeof(struct bvt_cpu_info));
         if ( CPU_INFO(i) == NULL )
         {
             printk("Failed to allocate BVT scheduler per-CPU memory!\n");
index a43e725db60e830b9e59f7a44da0437bdf6f0753..b54c0efc458fcd930a3e637cbb3b8da73bef2141 100644 (file)
@@ -110,7 +110,7 @@ struct domain *alloc_domain_struct(void)
 {
     struct domain *d;
 
-    if ( (d = kmem_cache_alloc(domain_struct_cachep,GFP_KERNEL)) == NULL )
+    if ( (d = kmem_cache_alloc(domain_struct_cachep)) == NULL )
         return NULL;
     
     memset(d, 0, sizeof(*d));
index 7b20e57f1d4f1110e624418176f7281e69cc534c..f4deaec4fa6ac7a3e1f094ca248a39604b3bb66a 100644 (file)
@@ -242,22 +242,18 @@ int shadow_mode_enable( struct domain *p, unsigned int mode )
     m->shadow_mode = mode;
  
     // allocate hashtable
-    m->shadow_ht = kmalloc( shadow_ht_buckets * 
-                            sizeof(struct shadow_status), GFP_KERNEL );
-    if( ! m->shadow_ht )
+    m->shadow_ht = kmalloc(shadow_ht_buckets * 
+                           sizeof(struct shadow_status));
+    if( m->shadow_ht == NULL )
         goto nomem;
 
-    memset( m->shadow_ht, 0, shadow_ht_buckets * 
-            sizeof(struct shadow_status) );
-
+    memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
 
     // allocate space for first lot of extra nodes
-    m->shadow_ht_extras = kmalloc( sizeof(void*) + 
-                                                                  (shadow_ht_extra_size * 
-                                                                       sizeof(struct shadow_status)),
-                                                                  GFP_KERNEL );
-
-    if( ! m->shadow_ht_extras )
+    m->shadow_ht_extras = kmalloc(sizeof(void*) + 
+                                  (shadow_ht_extra_size * 
+                                   sizeof(struct shadow_status)));
+    if( m->shadow_ht_extras == NULL )
         goto nomem;
 
     memset( m->shadow_ht_extras, 0, sizeof(void*) + (shadow_ht_extra_size * 
@@ -280,9 +276,8 @@ int shadow_mode_enable( struct domain *p, unsigned int mode )
     {
         m->shadow_dirty_bitmap_size = (p->max_pages+63)&(~63);
         m->shadow_dirty_bitmap = 
-            kmalloc( m->shadow_dirty_bitmap_size/8, GFP_KERNEL );
-
-        if( !m->shadow_dirty_bitmap  )
+            kmalloc( m->shadow_dirty_bitmap_size/8);
+        if( m->shadow_dirty_bitmap == NULL )
         {
             m->shadow_dirty_bitmap_size = 0;
             goto nomem;
index 28a6f8f86e58df79e0417d1197cc232c96253051..2266baa9aff9d673ca2bf9c6fb8591cd4f94ad0e 100644 (file)
  * page long) and always contiguous), and each slab contains multiple
  * initialized objects.
  *
- * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
- * normal). If you need a special memory type, then must create a new
- * cache for that memory type.
- *
  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  *   full slabs with 0 free objects
  *   partial slabs
  *     are accessed without any locking.
  *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
  *  The non-constant members are protected with a per-cache irq spinlock.
- *
- * Further notes from the original documentation:
- *
- * 11 April '97.  Started multi-threading - markhe
- *     The global cache-chain is protected by the semaphore 'cache_chain_sem'.
- *     The sem is only needed when accessing/extending the cache-chain, which
- *     can never happen inside an interrupt (kmem_cache_create(),
- *     kmem_cache_shrink() and kmem_cache_reap()).
- *
- *     To prevent kmem_cache_shrink() trying to shrink a 'growing' cache (which
- *     maybe be sleeping and therefore not holding the semaphore/lock), the
- *     growing field is used.  This also prevents reaping from a cache.
- *
- *     At present, each engine can be growing a cache.  This should be blocked.
- *
- */
-
-/*
- * DEBUG       - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
- *               SLAB_RED_ZONE & SLAB_POISON.
- *               0 for faster, smaller code (especially in the critical paths).
- *
- * STATS       - 1 to collect stats for /proc/slabinfo.
- *               0 for faster, smaller code (especially in the critical paths).
- *
- * FORCED_DEBUG        - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
  */
 
 #include <xen/config.h>
 #include <xen/smp.h>
 #include <xen/sched.h>
 
-
+/*
+ * DEBUG  - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
+ *         SLAB_RED_ZONE & SLAB_POISON.
+ *         0 for faster, smaller code (especially in the critical paths).
+ *
+ * STATS  - 1 to collect stats for /proc/slabinfo.
+ *         0 for faster, smaller code (especially in the critical paths).
+ *
+ * FORCED_DEBUG        - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
+ */
 #ifdef CONFIG_DEBUG_SLAB
 #define        DEBUG           1
 #define        STATS           1
 
 /* Legal flag mask for kmem_cache_create(). */
 #if DEBUG
-# define CREATE_MASK   (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
+#define CREATE_MASK    (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
                         SLAB_POISON | SLAB_HWCACHE_ALIGN | \
-                        SLAB_NO_REAP | SLAB_CACHE_DMA)
+                        SLAB_NO_REAP)
 #else
-# define CREATE_MASK   (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | SLAB_CACHE_DMA)
+#define CREATE_MASK    (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP)
 #endif
 
 /*
@@ -155,11 +134,11 @@ static unsigned long offslab_limit;
  * Slabs are chained into three list: fully used, partial, fully free slabs.
  */
 typedef struct slab_s {
-       struct list_head        list;
-       unsigned long           colouroff;
-       void                    *s_mem;         /* including colour offset */
-       unsigned int            inuse;          /* num of objs active in slab */
-       kmem_bufctl_t           free;
+    struct list_head list;
+    unsigned long    colouroff;
+    void            *s_mem;    /* including colour offset */
+    unsigned int     inuse;    /* num of objs active in slab */
+    kmem_bufctl_t    free;
 } slab_t;
 
 #define slab_bufctl(slabp) \
@@ -173,8 +152,8 @@ typedef struct slab_s {
  * footprint.
  */
 typedef struct cpucache_s {
-       unsigned int avail;
-       unsigned int limit;
+    unsigned int avail;
+    unsigned int limit;
 } cpucache_t;
 
 #define cc_entry(cpucache) \
@@ -191,59 +170,55 @@ typedef struct cpucache_s {
 
 struct kmem_cache_s {
 /* 1) each alloc & free */
-       /* full, partial first, then free */
-       struct list_head        slabs_full;
-       struct list_head        slabs_partial;
-       struct list_head        slabs_free;
-       unsigned int            objsize;
-       unsigned int            flags;  /* constant flags */
-       unsigned int            num;    /* # of objs per slab */
-       spinlock_t              spinlock;
+    /* full, partial first, then free */
+    struct list_head   slabs_full;
+    struct list_head   slabs_partial;
+    struct list_head   slabs_free;
+    unsigned int               objsize;
+    unsigned int               flags;  /* constant flags */
+    unsigned int               num;    /* # of objs per slab */
+    spinlock_t         spinlock;
 #ifdef CONFIG_SMP
-       unsigned int            batchcount;
+    unsigned int               batchcount;
 #endif
 
 /* 2) slab additions /removals */
-       /* order of pgs per slab (2^n) */
-       unsigned int            gfporder;
-
-       /* force GFP flags, e.g. GFP_DMA */
-       unsigned int            gfpflags;
+    /* order of pgs per slab (2^n) */
+    unsigned int               gfporder;
+    size_t                     colour;         /* cache colouring range */
+    unsigned int               colour_off;     /* colour offset */
+    unsigned int               colour_next;    /* cache colouring */
+    kmem_cache_t               *slabp_cache;
+    unsigned int               growing;
+    unsigned int               dflags;         /* dynamic flags */
 
-       size_t                  colour;         /* cache colouring range */
-       unsigned int            colour_off;     /* colour offset */
-       unsigned int            colour_next;    /* cache colouring */
-       kmem_cache_t            *slabp_cache;
-       unsigned int            growing;
-       unsigned int            dflags;         /* dynamic flags */
+    /* constructor func */
+    void (*ctor)(void *, kmem_cache_t *, unsigned long);
 
-       /* constructor func */
-       void (*ctor)(void *, kmem_cache_t *, unsigned long);
+    /* de-constructor func */
+    void (*dtor)(void *, kmem_cache_t *, unsigned long);
 
-       /* de-constructor func */
-       void (*dtor)(void *, kmem_cache_t *, unsigned long);
-
-       unsigned long           failures;
+    unsigned long              failures;
 
 /* 3) cache creation/removal */
-       char                    name[CACHE_NAMELEN];
-       struct list_head        next;
+    char                       name[CACHE_NAMELEN];
+    struct list_head   next;
 #ifdef CONFIG_SMP
 /* 4) per-cpu data */
-       cpucache_t              *cpudata[NR_CPUS];
+    cpucache_t         *cpudata[NR_CPUS];
 #endif
 #if STATS
-       unsigned long           num_active;
-       unsigned long           num_allocations;
-       unsigned long           high_mark;
-       unsigned long           grown;
-       unsigned long           reaped;
-       unsigned long           errors;
+    unsigned long              num_active;
+    unsigned long              num_allocations;
+    unsigned long              high_mark;
+    unsigned long              grown;
+    unsigned long              reaped;
+    unsigned long              errors;
 #ifdef CONFIG_SMP
-       atomic_t                allochit;
-       atomic_t                allocmiss;
-       atomic_t                freehit;
-       atomic_t                freemiss;
+    atomic_t           allochit;
+    atomic_t           allocmiss;
+    atomic_t           freehit;
+    atomic_t           freemiss;
 #endif
 #endif
 };
@@ -331,40 +306,34 @@ static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
 
 /* Size description struct for general caches. */
 typedef struct cache_sizes {
-       size_t           cs_size;
-       kmem_cache_t    *cs_cachep;
-       kmem_cache_t    *cs_dmacachep;
+    size_t              cs_size;
+    kmem_cache_t       *cs_cachep;
 } cache_sizes_t;
 
 static cache_sizes_t cache_sizes[] = {
-#if PAGE_SIZE == 4096
-       {    32,        NULL, NULL},
-#endif
-       {    64,        NULL, NULL},
-       {   128,        NULL, NULL},
-       {   256,        NULL, NULL},
-       {   512,        NULL, NULL},
-       {  1024,        NULL, NULL},
-       {  2048,        NULL, NULL},
-       {  4096,        NULL, NULL},
-       {  8192,        NULL, NULL},
-       { 16384,        NULL, NULL},
-       { 32768,        NULL, NULL},
-       { 65536,        NULL, NULL},
-       {131072,        NULL, NULL},
-       {     0,        NULL, NULL}
+    {    32,   NULL},
+    {    64,   NULL},
+    {   128,   NULL},
+    {   256,   NULL},
+    {   512,   NULL},
+    {  1024,   NULL},
+    {  2048,   NULL},
+    {  4096,   NULL},
+    {  8192,   NULL},
+    { 16384,   NULL},
+    {     0,   NULL}
 };
 
 /* internal cache of cache description objs */
 static kmem_cache_t cache_cache = {
-       slabs_full:     LIST_HEAD_INIT(cache_cache.slabs_full),
-       slabs_partial:  LIST_HEAD_INIT(cache_cache.slabs_partial),
-       slabs_free:     LIST_HEAD_INIT(cache_cache.slabs_free),
-       objsize:        sizeof(kmem_cache_t),
-       flags:          SLAB_NO_REAP,
-       spinlock:       SPIN_LOCK_UNLOCKED,
-       colour_off:     L1_CACHE_BYTES,
-       name:           "kmem_cache",
+    slabs_full:    LIST_HEAD_INIT(cache_cache.slabs_full),
+    slabs_partial: LIST_HEAD_INIT(cache_cache.slabs_partial),
+    slabs_free:    LIST_HEAD_INIT(cache_cache.slabs_free),
+    objsize:       sizeof(kmem_cache_t),
+    flags:         SLAB_NO_REAP,
+    spinlock:      SPIN_LOCK_UNLOCKED,
+    colour_off:    L1_CACHE_BYTES,
+    name:          "kmem_cache"
 };
 
 /* Guard access to the cache-chain. */
@@ -392,47 +361,47 @@ static void enable_all_cpucaches (void);
 
 /* Cal the num objs, wastage, and bytes left over for a given slab size. */
 static void kmem_cache_estimate (unsigned long gfporder, size_t size,
-                int flags, size_t *left_over, unsigned int *num)
+                                 int flags, size_t *left_over, unsigned int *num)
 {
-       int i;
-       size_t wastage = PAGE_SIZE<<gfporder;
-       size_t extra = 0;
-       size_t base = 0;
-
-       if (!(flags & CFLGS_OFF_SLAB)) {
-               base = sizeof(slab_t);
-               extra = sizeof(kmem_bufctl_t);
-       }
-       i = 0;
-       while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
-               i++;
-       if (i > 0)
-               i--;
-
-       if (i > SLAB_LIMIT)
-               i = SLAB_LIMIT;
-
-       *num = i;
-       wastage -= i*size;
-       wastage -= L1_CACHE_ALIGN(base+i*extra);
-       *left_over = wastage;
+    int i;
+    size_t wastage = PAGE_SIZE<<gfporder;
+    size_t extra = 0;
+    size_t base = 0;
+
+    if (!(flags & CFLGS_OFF_SLAB)) {
+        base = sizeof(slab_t);
+        extra = sizeof(kmem_bufctl_t);
+    }
+    i = 0;
+    while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
+        i++;
+    if (i > 0)
+        i--;
+
+    if (i > SLAB_LIMIT)
+        i = SLAB_LIMIT;
+
+    *num = i;
+    wastage -= i*size;
+    wastage -= L1_CACHE_ALIGN(base+i*extra);
+    *left_over = wastage;
 }
 
 /* Initialisation - setup the `cache' cache. */
 void __init kmem_cache_init(void)
 {
-       size_t left_over;
+    size_t left_over;
 
-       init_MUTEX(&cache_chain_sem);
-       INIT_LIST_HEAD(&cache_chain);
+    init_MUTEX(&cache_chain_sem);
+    INIT_LIST_HEAD(&cache_chain);
 
-       kmem_cache_estimate(0, cache_cache.objsize, 0,
+    kmem_cache_estimate(0, cache_cache.objsize, 0,
                        &left_over, &cache_cache.num);
-       if (!cache_cache.num)
-               BUG();
+    if (!cache_cache.num)
+        BUG();
 
-       cache_cache.colour = left_over/cache_cache.colour_off;
-       cache_cache.colour_next = 0;
+    cache_cache.colour = left_over/cache_cache.colour_off;
+    cache_cache.colour_next = 0;
 }
 
 
@@ -441,117 +410,106 @@ void __init kmem_cache_init(void)
  */
 void __init kmem_cache_sizes_init(unsigned long num_physpages)
 {
-       cache_sizes_t *sizes = cache_sizes;
-       char name[20];
-       /*
-        * Fragmentation resistance on low memory - only use bigger
-        * page orders on machines with more than 32MB of memory.
-        */
-       if (num_physpages > (32 << 20) >> PAGE_SHIFT)
-               slab_break_gfp_order = BREAK_GFP_ORDER_HI;
-       do {
-               /* For performance, all the general caches are L1 aligned.
-                * This should be particularly beneficial on SMP boxes, as it
-                * eliminates "false sharing".
-                * Note for systems short on memory removing the alignment will
-                * allow tighter packing of the smaller caches. */
-               sprintf(name,"size-%Zd",sizes->cs_size);
-               if (!(sizes->cs_cachep =
-                       kmem_cache_create(name, sizes->cs_size,
-                                       0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
-                       BUG();
-               }
-
-               /* Inc off-slab bufctl limit until the ceiling is hit. */
-               if (!(OFF_SLAB(sizes->cs_cachep))) {
-                       offslab_limit = sizes->cs_size-sizeof(slab_t);
-                       offslab_limit /= 2;
-               }
-               sprintf(name, "size-%Zd(DMA)",sizes->cs_size);
-               sizes->cs_dmacachep = kmem_cache_create(name, sizes->cs_size, 0,
-                             SLAB_CACHE_DMA|SLAB_HWCACHE_ALIGN, NULL, NULL);
-               if (!sizes->cs_dmacachep)
-                       BUG();
-               sizes++;
-       } while (sizes->cs_size);
+    cache_sizes_t *sizes = cache_sizes;
+    char name[20];
+    /*
+     * Fragmentation resistance on low memory - only use bigger
+     * page orders on machines with more than 32MB of memory.
+     */
+    if (num_physpages > (32 << 20) >> PAGE_SHIFT)
+        slab_break_gfp_order = BREAK_GFP_ORDER_HI;
+    do {
+        /* For performance, all the general caches are L1 aligned.
+         * This should be particularly beneficial on SMP boxes, as it
+         * eliminates "false sharing".
+         * Note for systems short on memory removing the alignment will
+         * allow tighter packing of the smaller caches. */
+        sprintf(name,"size-%Zd",sizes->cs_size);
+        if (!(sizes->cs_cachep =
+              kmem_cache_create(name, sizes->cs_size,
+                                0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
+            BUG();
+        }
+
+        /* Inc off-slab bufctl limit until the ceiling is hit. */
+        if (!(OFF_SLAB(sizes->cs_cachep))) {
+            offslab_limit = sizes->cs_size-sizeof(slab_t);
+            offslab_limit /= 2;
+        }
+        sizes++;
+    } while (sizes->cs_size);
 }
 
 int __init kmem_cpucache_init(void)
 {
 #ifdef CONFIG_SMP
-       g_cpucache_up = 1;
-       enable_all_cpucaches();
+    g_cpucache_up = 1;
+    enable_all_cpucaches();
 #endif
-       return 0;
+    return 0;
 }
 
 /*__initcall(kmem_cpucache_init);*/
 
 /* Interface to system's page allocator. No need to hold the cache-lock.
  */
-static inline void * kmem_getpages (kmem_cache_t *cachep, unsigned long flags)
+static inline void *kmem_getpages(kmem_cache_t *cachep)
 {
-       void    *addr;
-
-       /*
-        * If we requested dmaable memory, we will get it. Even if we
-        * did not request dmaable memory, we might get it, but that
-        * would be relatively rare and ignorable.
-        */
-       flags |= cachep->gfpflags;
-       addr = (void*) __get_free_pages(flags, cachep->gfporder);
-       /* Assume that now we have the pages no one else can legally
-        * messes with the 'struct page's.
-        * However vm_scan() might try to test the structure to see if
-        * it is a named-page or buffer-page.  The members it tests are
-        * of no interest here.....
-        */
-       return addr;
+    void *addr;
+
+    addr = (void*) __get_free_pages(cachep->gfporder);
+    /* Assume that now we have the pages no one else can legally
+     * messes with the 'struct page's.
+     * However vm_scan() might try to test the structure to see if
+     * it is a named-page or buffer-page.  The members it tests are
+     * of no interest here.....
+     */
+    return addr;
 }
 
 /* Interface to system's page release. */
 static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
 {
-       unsigned long i = (1<<cachep->gfporder);
-       struct pfn_info *page = virt_to_page(addr);
-
-       /* free_pages() does not clear the type bit - we do that.
-        * The pages have been unlinked from their cache-slab,
-        * but their 'struct page's might be accessed in
-        * vm_scan(). Shouldn't be a worry.
-        */
-       while (i--) {
-               PageClearSlab(page);
-               page++;
-       }
-
-       free_pages((unsigned long)addr, cachep->gfporder);
+    unsigned long i = (1<<cachep->gfporder);
+    struct pfn_info *page = virt_to_page(addr);
+
+    /* free_pages() does not clear the type bit - we do that.
+     * The pages have been unlinked from their cache-slab,
+     * but their 'struct page's might be accessed in
+     * vm_scan(). Shouldn't be a worry.
+     */
+    while (i--) {
+        PageClearSlab(page);
+        page++;
+    }
+
+    free_pages((unsigned long)addr, cachep->gfporder);
 }
 
 #if DEBUG
 static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr)
 {
-       int size = cachep->objsize;
-       if (cachep->flags & SLAB_RED_ZONE) {
-               addr += BYTES_PER_WORD;
-               size -= 2*BYTES_PER_WORD;
-       }
-       memset(addr, POISON_BYTE, size);
-       *(unsigned char *)(addr+size-1) = POISON_END;
+    int size = cachep->objsize;
+    if (cachep->flags & SLAB_RED_ZONE) {
+        addr += BYTES_PER_WORD;
+        size -= 2*BYTES_PER_WORD;
+    }
+    memset(addr, POISON_BYTE, size);
+    *(unsigned char *)(addr+size-1) = POISON_END;
 }
 
 static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
 {
-       int size = cachep->objsize;
-       void *end;
-       if (cachep->flags & SLAB_RED_ZONE) {
-               addr += BYTES_PER_WORD;
-               size -= 2*BYTES_PER_WORD;
-       }
-       end = memchr(addr, POISON_END, size);
-       if (end != (addr+size-1))
-               return 1;
-       return 0;
+    int size = cachep->objsize;
+    void *end;
+    if (cachep->flags & SLAB_RED_ZONE) {
+        addr += BYTES_PER_WORD;
+        size -= 2*BYTES_PER_WORD;
+    }
+    end = memchr(addr, POISON_END, size);
+    if (end != (addr+size-1))
+        return 1;
+    return 0;
 }
 #endif
 
@@ -561,40 +519,40 @@ static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
  */
 static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
 {
-       if (cachep->dtor
+    if (cachep->dtor
 #if DEBUG
-               || cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)
+        || cachep->flags & (SLAB_POISON | SLAB_RED_ZONE)
 #endif
        ) {
-               int i;
-               for (i = 0; i < cachep->num; i++) {
-                       void* objp = slabp->s_mem+cachep->objsize*i;
+        int i;
+        for (i = 0; i < cachep->num; i++) {
+            void* objp = slabp->s_mem+cachep->objsize*i;
 #if DEBUG
-                       if (cachep->flags & SLAB_RED_ZONE) {
-                               if (*((unsigned long*)(objp)) != RED_MAGIC1)
-                                       BUG();
-                               if (*((unsigned long*)(objp + cachep->objsize
-                                               -BYTES_PER_WORD)) != RED_MAGIC1)
-                                       BUG();
-                               objp += BYTES_PER_WORD;
-                       }
+            if (cachep->flags & SLAB_RED_ZONE) {
+                if (*((unsigned long*)(objp)) != RED_MAGIC1)
+                    BUG();
+                if (*((unsigned long*)(objp + cachep->objsize
+                                       -BYTES_PER_WORD)) != RED_MAGIC1)
+                    BUG();
+                objp += BYTES_PER_WORD;
+            }
 #endif
-                       if (cachep->dtor)
-                               (cachep->dtor)(objp, cachep, 0);
+            if (cachep->dtor)
+                (cachep->dtor)(objp, cachep, 0);
 #if DEBUG
-                       if (cachep->flags & SLAB_RED_ZONE) {
-                               objp -= BYTES_PER_WORD;
-                       }       
-                       if ((cachep->flags & SLAB_POISON)  &&
-                               kmem_check_poison_obj(cachep, objp))
-                               BUG();
+            if (cachep->flags & SLAB_RED_ZONE) {
+                objp -= BYTES_PER_WORD;
+            }  
+            if ((cachep->flags & SLAB_POISON)  &&
+                kmem_check_poison_obj(cachep, objp))
+                BUG();
 #endif
-               }
-       }
+        }
+    }
 
-       kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
-       if (OFF_SLAB(cachep))
-               kmem_cache_free(cachep->slabp_cache, slabp);
+    kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
+    if (OFF_SLAB(cachep))
+        kmem_cache_free(cachep->slabp_cache, slabp);
 }
 
 /**
@@ -627,210 +585,211 @@ static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
  */
 kmem_cache_t *
 kmem_cache_create (const char *name, size_t size, size_t offset,
-       unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
-       void (*dtor)(void*, kmem_cache_t *, unsigned long))
+                   unsigned long flags,
+                   void (*ctor)(void*, kmem_cache_t *, unsigned long),
+                   void (*dtor)(void*, kmem_cache_t *, unsigned long))
 {
-       const char *func_nm = KERN_ERR "kmem_create: ";
-       size_t left_over, align, slab_size;
-       kmem_cache_t *cachep = NULL;
-        unsigned long spin_flags;
-
-       /*
-        * Sanity checks... these are all serious usage bugs.
-        */
-       if ((!name) ||
-               ((strlen(name) >= CACHE_NAMELEN - 1)) ||
-               (size < BYTES_PER_WORD) ||
-               (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
-               (dtor && !ctor) ||
-               (offset < 0 || offset > size))
-                       BUG();
+    const char *func_nm = KERN_ERR "kmem_create: ";
+    size_t left_over, align, slab_size;
+    kmem_cache_t *cachep = NULL;
+    unsigned long spin_flags;
+
+    /*
+     * Sanity checks... these are all serious usage bugs.
+     */
+    if ((!name) ||
+        ((strlen(name) >= CACHE_NAMELEN - 1)) ||
+        (size < BYTES_PER_WORD) ||
+        (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
+        (dtor && !ctor) ||
+        (offset < 0 || offset > size))
+        BUG();
 
 #if DEBUG
-       if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
-               /* No constructor, but inital state check requested */
-               printk("%sNo con, but init state check requested - %s\n", func_nm, name);
-               flags &= ~SLAB_DEBUG_INITIAL;
-       }
-
-       if ((flags & SLAB_POISON) && ctor) {
-               /* request for poisoning, but we can't do that with a constructor */
-               printk("%sPoisoning requested, but con given - %s\n", func_nm, name);
-               flags &= ~SLAB_POISON;
-       }
+    if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
+        /* No constructor, but inital state check requested */
+        printk("%sNo con, but init state check requested - %s\n",
+               func_nm, name);
+        flags &= ~SLAB_DEBUG_INITIAL;
+    }
+
+    if ((flags & SLAB_POISON) && ctor) {
+        /* request for poisoning, but we can't do that with a constructor */
+        printk("%sPoisoning requested, but con given - %s\n",
+               func_nm, name);
+        flags &= ~SLAB_POISON;
+    }
 #if FORCED_DEBUG
-       if (size < (PAGE_SIZE>>3))
-               /*
-                * do not red zone large object, causes severe
-                * fragmentation.
-                */
-               flags |= SLAB_RED_ZONE;
-       if (!ctor)
-               flags |= SLAB_POISON;
+    if (size < (PAGE_SIZE>>3))
+        /*
+         * do not red zone large object, causes severe
+         * fragmentation.
+         */
+        flags |= SLAB_RED_ZONE;
+    if (!ctor)
+        flags |= SLAB_POISON;
 #endif
 #endif
 
-       /*
-        * Always checks flags, a caller might be expecting debug
-        * support which isn't available.
-        */
-       if (flags & ~CREATE_MASK)
-               BUG();
-
-       /* Get cache's description obj. */
-       cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
-       if (!cachep)
-               goto opps;
-       memset(cachep, 0, sizeof(kmem_cache_t));
-
-       /* Check that size is in terms of words.  This is needed to avoid
-        * unaligned accesses for some archs when redzoning is used, and makes
-        * sure any on-slab bufctl's are also correctly aligned.
-        */
-       if (size & (BYTES_PER_WORD-1)) {
-               size += (BYTES_PER_WORD-1);
-               size &= ~(BYTES_PER_WORD-1);
-               printk("%sForcing size word alignment - %s\n", func_nm, name);
-       }
+    /*
+     * Always checks flags, a caller might be expecting debug
+     * support which isn't available.
+     */
+    if (flags & ~CREATE_MASK)
+        BUG();
+
+    /* Get cache's description obj. */
+    cachep = (kmem_cache_t *)kmem_cache_alloc(&cache_cache);
+    if (!cachep)
+        goto opps;
+    memset(cachep, 0, sizeof(kmem_cache_t));
+
+    /* Check that size is in terms of words.  This is needed to avoid
+     * unaligned accesses for some archs when redzoning is used, and makes
+     * sure any on-slab bufctl's are also correctly aligned.
+     */
+    if (size & (BYTES_PER_WORD-1)) {
+        size += (BYTES_PER_WORD-1);
+        size &= ~(BYTES_PER_WORD-1);
+        printk("%sForcing size word alignment - %s\n", func_nm, name);
+    }
        
 #if DEBUG
-       if (flags & SLAB_RED_ZONE) {
-               /*
-                * There is no point trying to honour cache alignment
-                * when redzoning.
-                */
-               flags &= ~SLAB_HWCACHE_ALIGN;
-               size += 2*BYTES_PER_WORD;       /* words for redzone */
-       }
+    if (flags & SLAB_RED_ZONE) {
+        /*
+         * There is no point trying to honour cache alignment
+         * when redzoning.
+         */
+        flags &= ~SLAB_HWCACHE_ALIGN;
+        size += 2*BYTES_PER_WORD;      /* words for redzone */
+    }
 #endif
-       align = BYTES_PER_WORD;
-       if (flags & SLAB_HWCACHE_ALIGN)
-               align = L1_CACHE_BYTES;
-
-       /* Determine if the slab management is 'on' or 'off' slab. */
-       if (size >= (PAGE_SIZE>>3))
-               /*
-                * Size is large, assume best to place the slab management obj
-                * off-slab (should allow better packing of objs).
-                */
-               flags |= CFLGS_OFF_SLAB;
-
-       if (flags & SLAB_HWCACHE_ALIGN) {
-               /* Need to adjust size so that objs are cache aligned. */
-               /* Small obj size, can get at least two per cache line. */
-               /* FIXME: only power of 2 supported, was better */
-               while (size < align/2)
-                       align /= 2;
-               size = (size+align-1)&(~(align-1));
-       }
-
-       /* Cal size (in pages) of slabs, and the num of objs per slab.
-        * This could be made much more intelligent.  For now, try to avoid
-        * using high page-orders for slabs.  When the gfp() funcs are more
-        * friendly towards high-order requests, this should be changed.
-        */
-       do {
-               unsigned int break_flag = 0;
-cal_wastage:
-               kmem_cache_estimate(cachep->gfporder, size, flags,
-                                               &left_over, &cachep->num);
-               if (break_flag)
-                       break;
-               if (cachep->gfporder >= MAX_GFP_ORDER)
-                       break;
-               if (!cachep->num)
-                       goto next;
-               if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {
-                       /* Oops, this num of objs will cause problems. */
-                       cachep->gfporder--;
-                       break_flag++;
-                       goto cal_wastage;
-               }
-
-               /*
-                * Large num of objs is good, but v. large slabs are currently
-                * bad for the gfp()s.
-                */
-               if (cachep->gfporder >= slab_break_gfp_order)
-                       break;
-
-               if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
-                       break;  /* Acceptable internal fragmentation. */
-next:
-               cachep->gfporder++;
-       } while (1);
-
-       if (!cachep->num) {
-               printk("kmem_cache_create: couldn't create cache %s.\n", name);
-               kmem_cache_free(&cache_cache, cachep);
-               cachep = NULL;
-               goto opps;
-       }
-       slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t)+sizeof(slab_t));
-
-       /*
-        * If the slab has been placed off-slab, and we have enough space then
-        * move it on-slab. This is at the expense of any extra colouring.
-        */
-       if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
-               flags &= ~CFLGS_OFF_SLAB;
-               left_over -= slab_size;
-       }
-
-       /* Offset must be a multiple of the alignment. */
-       offset += (align-1);
-       offset &= ~(align-1);
-       if (!offset)
-               offset = L1_CACHE_BYTES;
-       cachep->colour_off = offset;
-       cachep->colour = left_over/offset;
-
-       /* init remaining fields */
-       if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))
-               flags |= CFLGS_OPTIMIZE;
-
-       cachep->flags = flags;
-       cachep->gfpflags = 0;
-       if (flags & SLAB_CACHE_DMA)
-               cachep->gfpflags |= GFP_DMA;
-       spin_lock_init(&cachep->spinlock);
-       cachep->objsize = size;
-       INIT_LIST_HEAD(&cachep->slabs_full);
-       INIT_LIST_HEAD(&cachep->slabs_partial);
-       INIT_LIST_HEAD(&cachep->slabs_free);
-
-       if (flags & CFLGS_OFF_SLAB)
-               cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
-       cachep->ctor = ctor;
-       cachep->dtor = dtor;
-       /* Copy name over so we don't have problems with unloaded modules */
-       strcpy(cachep->name, name);
+    align = BYTES_PER_WORD;
+    if (flags & SLAB_HWCACHE_ALIGN)
+        align = L1_CACHE_BYTES;
+
+    /* Determine if the slab management is 'on' or 'off' slab. */
+    if (size >= (PAGE_SIZE>>3))
+        /*
+         * Size is large, assume best to place the slab management obj
+         * off-slab (should allow better packing of objs).
+         */
+        flags |= CFLGS_OFF_SLAB;
+
+    if (flags & SLAB_HWCACHE_ALIGN) {
+        /* Need to adjust size so that objs are cache aligned. */
+        /* Small obj size, can get at least two per cache line. */
+        /* FIXME: only power of 2 supported, was better */
+        while (size < align/2)
+            align /= 2;
+        size = (size+align-1)&(~(align-1));
+    }
+
+    /* Cal size (in pages) of slabs, and the num of objs per slab.
+     * This could be made much more intelligent.  For now, try to avoid
+     * using high page-orders for slabs.  When the gfp() funcs are more
+     * friendly towards high-order requests, this should be changed.
+     */
+    do {
+        unsigned int break_flag = 0;
+    cal_wastage:
+        kmem_cache_estimate(cachep->gfporder, size, flags,
+                            &left_over, &cachep->num);
+        if (break_flag)
+            break;
+        if (cachep->gfporder >= MAX_GFP_ORDER)
+            break;
+        if (!cachep->num)
+            goto next;
+        if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit) {
+            /* Oops, this num of objs will cause problems. */
+            cachep->gfporder--;
+            break_flag++;
+            goto cal_wastage;
+        }
+
+        /*
+         * Large num of objs is good, but v. large slabs are currently
+         * bad for the gfp()s.
+         */
+        if (cachep->gfporder >= slab_break_gfp_order)
+            break;
+
+        if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
+            break;     /* Acceptable internal fragmentation. */
+    next:
+        cachep->gfporder++;
+    } while (1);
+
+    if (!cachep->num) {
+        printk("kmem_cache_create: couldn't create cache %s.\n", name);
+        kmem_cache_free(&cache_cache, cachep);
+        cachep = NULL;
+        goto opps;
+    }
+    slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t) + 
+                               sizeof(slab_t));
+
+    /*
+     * If the slab has been placed off-slab, and we have enough space then
+     * move it on-slab. This is at the expense of any extra colouring.
+     */
+    if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
+        flags &= ~CFLGS_OFF_SLAB;
+        left_over -= slab_size;
+    }
+
+    /* Offset must be a multiple of the alignment. */
+    offset += (align-1);
+    offset &= ~(align-1);
+    if (!offset)
+        offset = L1_CACHE_BYTES;
+    cachep->colour_off = offset;
+    cachep->colour = left_over/offset;
+
+    /* init remaining fields */
+    if (!cachep->gfporder && !(flags & CFLGS_OFF_SLAB))
+        flags |= CFLGS_OPTIMIZE;
+
+    cachep->flags = flags;
+    spin_lock_init(&cachep->spinlock);
+    cachep->objsize = size;
+    INIT_LIST_HEAD(&cachep->slabs_full);
+    INIT_LIST_HEAD(&cachep->slabs_partial);
+    INIT_LIST_HEAD(&cachep->slabs_free);
+
+    if (flags & CFLGS_OFF_SLAB)
+        cachep->slabp_cache = kmem_find_general_cachep(slab_size);
+    cachep->ctor = ctor;
+    cachep->dtor = dtor;
+    /* Copy name over so we don't have problems with unloaded modules */
+    strcpy(cachep->name, name);
 
 #ifdef CONFIG_SMP
-       if (g_cpucache_up)
-               enable_cpucache(cachep);
+    if (g_cpucache_up)
+        enable_cpucache(cachep);
 #endif
-       /* Need the semaphore to access the chain. */
-       down(&cache_chain_sem);
-       {
-               struct list_head *p;
-
-               list_for_each(p, &cache_chain) {
-                       kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
-
-                       /* The name field is constant - no lock needed. */
-                       if (!strcmp(pc->name, name))
-                               BUG();
-               }
-       }
-
-       /* There is no reason to lock our new cache before we
-        * link it in - no one knows about it yet...
-        */
-       list_add(&cachep->next, &cache_chain);
-       up(&cache_chain_sem);
-opps:
-       return cachep;
+    /* Need the semaphore to access the chain. */
+    down(&cache_chain_sem);
+    {
+        struct list_head *p;
+
+        list_for_each(p, &cache_chain) {
+            kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+
+            /* The name field is constant - no lock needed. */
+            if (!strcmp(pc->name, name))
+                BUG();
+        }
+    }
+
+    /* There is no reason to lock our new cache before we
+     * link it in - no one knows about it yet...
+     */
+    list_add(&cachep->next, &cache_chain);
+    up(&cache_chain_sem);
+ opps:
+    return cachep;
 }
 
 
@@ -841,21 +800,21 @@ opps:
  */
 static int is_chained_kmem_cache(kmem_cache_t * cachep)
 {
-       struct list_head *p;
-       int ret = 0;
-        unsigned long spin_flags;
-
-       /* Find the cache in the chain of caches. */
-       down(&cache_chain_sem);
-       list_for_each(p, &cache_chain) {
-               if (p == &cachep->next) {
-                       ret = 1;
-                       break;
-               }
-       }
-       up(&cache_chain_sem);
-
-       return ret;
+    struct list_head *p;
+    int ret = 0;
+    unsigned long spin_flags;
+
+    /* Find the cache in the chain of caches. */
+    down(&cache_chain_sem);
+    list_for_each(p, &cache_chain) {
+        if (p == &cachep->next) {
+            ret = 1;
+            break;
+        }
+    }
+    up(&cache_chain_sem);
+
+    return ret;
 }
 #else
 #define is_chained_kmem_cache(x) 1
@@ -867,54 +826,54 @@ static int is_chained_kmem_cache(kmem_cache_t * cachep)
  */
 static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
 {
-       local_irq_disable();
-       func(arg);
-       local_irq_enable();
+    local_irq_disable();
+    func(arg);
+    local_irq_enable();
 
-       if (smp_call_function(func, arg, 1, 1))
-               BUG();
+    if (smp_call_function(func, arg, 1, 1))
+        BUG();
 }
 typedef struct ccupdate_struct_s
 {
-       kmem_cache_t *cachep;
-       cpucache_t *new[NR_CPUS];
+    kmem_cache_t *cachep;
+    cpucache_t *new[NR_CPUS];
 } ccupdate_struct_t;
 
 static void do_ccupdate_local(void *info)
 {
-       ccupdate_struct_t *new = (ccupdate_struct_t *)info;
-       cpucache_t *old = cc_data(new->cachep);
+    ccupdate_struct_t *new = (ccupdate_struct_t *)info;
+    cpucache_t *old = cc_data(new->cachep);
        
-       cc_data(new->cachep) = new->new[smp_processor_id()];
-       new->new[smp_processor_id()] = old;
+    cc_data(new->cachep) = new->new[smp_processor_id()];
+    new->new[smp_processor_id()] = old;
 }
 
 static void free_block (kmem_cache_t* cachep, void** objpp, int len);
 
 static void drain_cpu_caches(kmem_cache_t *cachep)
 {
-       ccupdate_struct_t new;
-       int i;
-        unsigned long spin_flags;
-
-       memset(&new.new,0,sizeof(new.new));
-
-       new.cachep = cachep;
-
-       down(&cache_chain_sem);
-       smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
-
-       for (i = 0; i < smp_num_cpus; i++) {
-               cpucache_t* ccold = new.new[cpu_logical_map(i)];
-               if (!ccold || (ccold->avail == 0))
-                       continue;
-               local_irq_disable();
-               free_block(cachep, cc_entry(ccold), ccold->avail);
-               local_irq_enable();
-               ccold->avail = 0;
-       }
-       smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
-       up(&cache_chain_sem);
+    ccupdate_struct_t new;
+    int i;
+    unsigned long spin_flags;
+
+    memset(&new.new,0,sizeof(new.new));
+
+    new.cachep = cachep;
+
+    down(&cache_chain_sem);
+    smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+
+    for (i = 0; i < smp_num_cpus; i++) {
+        cpucache_t* ccold = new.new[cpu_logical_map(i)];
+        if (!ccold || (ccold->avail == 0))
+            continue;
+        local_irq_disable();
+        free_block(cachep, cc_entry(ccold), ccold->avail);
+        local_irq_enable();
+        ccold->avail = 0;
+    }
+    smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+    up(&cache_chain_sem);
 }
 
 #else
@@ -923,35 +882,36 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
 
 static int __kmem_cache_shrink(kmem_cache_t *cachep)
 {
-       slab_t *slabp;
-       int ret;
+    slab_t *slabp;
+    int ret;
 
-       drain_cpu_caches(cachep);
+    drain_cpu_caches(cachep);
 
-       spin_lock_irq(&cachep->spinlock);
+    spin_lock_irq(&cachep->spinlock);
 
-       /* If the cache is growing, stop shrinking. */
-       while (!cachep->growing) {
-               struct list_head *p;
+    /* If the cache is growing, stop shrinking. */
+    while (!cachep->growing) {
+        struct list_head *p;
 
-               p = cachep->slabs_free.prev;
-               if (p == &cachep->slabs_free)
-                       break;
+        p = cachep->slabs_free.prev;
+        if (p == &cachep->slabs_free)
+            break;
 
-               slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
+        slabp = list_entry(cachep->slabs_free.prev, slab_t, list);
 #if DEBUG
-               if (slabp->inuse)
-                       BUG();
+        if (slabp->inuse)
+            BUG();
 #endif
-               list_del(&slabp->list);
-
-               spin_unlock_irq(&cachep->spinlock);
-               kmem_slab_destroy(cachep, slabp);
-               spin_lock_irq(&cachep->spinlock);
-       }
-       ret = !list_empty(&cachep->slabs_full) || !list_empty(&cachep->slabs_partial);
-       spin_unlock_irq(&cachep->spinlock);
-       return ret;
+        list_del(&slabp->list);
+
+        spin_unlock_irq(&cachep->spinlock);
+        kmem_slab_destroy(cachep, slabp);
+        spin_lock_irq(&cachep->spinlock);
+    }
+    ret = (!list_empty(&cachep->slabs_full) || 
+           !list_empty(&cachep->slabs_partial));
+    spin_unlock_irq(&cachep->spinlock);
+    return ret;
 }
 
 /**
@@ -963,10 +923,10 @@ static int __kmem_cache_shrink(kmem_cache_t *cachep)
  */
 int kmem_cache_shrink(kmem_cache_t *cachep)
 {
-       if (!cachep || !is_chained_kmem_cache(cachep))
-               BUG();
+    if (!cachep || !is_chained_kmem_cache(cachep))
+        BUG();
 
-       return __kmem_cache_shrink(cachep);
+    return __kmem_cache_shrink(cachep);
 }
 
 /**
@@ -986,201 +946,187 @@ int kmem_cache_shrink(kmem_cache_t *cachep)
  */
 int kmem_cache_destroy (kmem_cache_t * cachep)
 {
-        unsigned long spin_flags;
-
-       if (!cachep || cachep->growing)
-               BUG();
-
-       /* Find the cache in the chain of caches. */
-       down(&cache_chain_sem);
-       /* the chain is never empty, cache_cache is never destroyed */
-       if (clock_searchp == cachep)
-               clock_searchp = list_entry(cachep->next.next,
-                                               kmem_cache_t, next);
-       list_del(&cachep->next);
-       up(&cache_chain_sem);
-
-       if (__kmem_cache_shrink(cachep)) {
-               printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
-                      cachep);
-               down(&cache_chain_sem);
-               list_add(&cachep->next,&cache_chain);
-               up(&cache_chain_sem);
-               return 1;
-       }
+    unsigned long spin_flags;
+
+    if (!cachep || cachep->growing)
+        BUG();
+
+    /* Find the cache in the chain of caches. */
+    down(&cache_chain_sem);
+    /* the chain is never empty, cache_cache is never destroyed */
+    if (clock_searchp == cachep)
+        clock_searchp = list_entry(cachep->next.next,
+                                   kmem_cache_t, next);
+    list_del(&cachep->next);
+    up(&cache_chain_sem);
+
+    if (__kmem_cache_shrink(cachep)) {
+        printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
+               cachep);
+        down(&cache_chain_sem);
+        list_add(&cachep->next,&cache_chain);
+        up(&cache_chain_sem);
+        return 1;
+    }
 #ifdef CONFIG_SMP
-       {
-               int i;
-               for (i = 0; i < NR_CPUS; i++)
-                       kfree(cachep->cpudata[i]);
-       }
+    {
+        int i;
+        for (i = 0; i < NR_CPUS; i++)
+            kfree(cachep->cpudata[i]);
+    }
 #endif
-       kmem_cache_free(&cache_cache, cachep);
+    kmem_cache_free(&cache_cache, cachep);
 
-       return 0;
+    return 0;
 }
 
 /* Get the memory for a slab management obj. */
-static inline slab_t * kmem_cache_slabmgmt (kmem_cache_t *cachep,
-                       void *objp, int colour_off, int local_flags)
+static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
+                                          void *objp, int colour_off, 
+                                          int local_flags)
 {
-       slab_t *slabp;
+    slab_t *slabp;
        
-       if (OFF_SLAB(cachep)) {
-               /* Slab management obj is off-slab. */
-               slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
-               if (!slabp)
-                       return NULL;
-       } else {
-               /* FIXME: change to
-                       slabp = objp
-                * if you enable OPTIMIZE
-                */
-               slabp = objp+colour_off;
-               colour_off += L1_CACHE_ALIGN(cachep->num *
-                               sizeof(kmem_bufctl_t) + sizeof(slab_t));
-       }
-       slabp->inuse = 0;
-       slabp->colouroff = colour_off;
-       slabp->s_mem = objp+colour_off;
-
-       return slabp;
+    if (OFF_SLAB(cachep)) {
+        /* Slab management obj is off-slab. */
+        slabp = kmem_cache_alloc(cachep->slabp_cache);
+        if (!slabp)
+            return NULL;
+    } else {
+        /* FIXME: change to
+           slabp = objp
+           * if you enable OPTIMIZE
+           */
+        slabp = objp+colour_off;
+        colour_off += L1_CACHE_ALIGN(cachep->num *
+                                     sizeof(kmem_bufctl_t) + sizeof(slab_t));
+    }
+    slabp->inuse = 0;
+    slabp->colouroff = colour_off;
+    slabp->s_mem = objp+colour_off;
+
+    return slabp;
 }
 
-static inline void kmem_cache_init_objs (kmem_cache_t * cachep,
-                       slab_t * slabp, unsigned long ctor_flags)
+static inline void kmem_cache_init_objs(kmem_cache_t *cachep,
+                                         slab_t *slabp,
+                                        unsigned long ctor_flags)
 {
-       int i;
+    int i;
 
-       for (i = 0; i < cachep->num; i++) {
-               void* objp = slabp->s_mem+cachep->objsize*i;
+    for (i = 0; i < cachep->num; i++) {
+        void* objp = slabp->s_mem+cachep->objsize*i;
 #if DEBUG
-               if (cachep->flags & SLAB_RED_ZONE) {
-                       *((unsigned long*)(objp)) = RED_MAGIC1;
-                       *((unsigned long*)(objp + cachep->objsize -
-                                       BYTES_PER_WORD)) = RED_MAGIC1;
-                       objp += BYTES_PER_WORD;
-               }
+        if (cachep->flags & SLAB_RED_ZONE) {
+            *((unsigned long*)(objp)) = RED_MAGIC1;
+            *((unsigned long*)(objp + cachep->objsize -
+                               BYTES_PER_WORD)) = RED_MAGIC1;
+            objp += BYTES_PER_WORD;
+        }
 #endif
 
-               /*
-                * Constructors are not allowed to allocate memory from
-                * the same cache which they are a constructor for.
-                * Otherwise, deadlock. They must also be threaded.
-                */
-               if (cachep->ctor)
-                       cachep->ctor(objp, cachep, ctor_flags);
+        /*
+         * Constructors are not allowed to allocate memory from
+         * the same cache which they are a constructor for.
+         * Otherwise, deadlock. They must also be threaded.
+         */
+        if (cachep->ctor)
+            cachep->ctor(objp, cachep, ctor_flags);
 #if DEBUG
-               if (cachep->flags & SLAB_RED_ZONE)
-                       objp -= BYTES_PER_WORD;
-               if (cachep->flags & SLAB_POISON)
-                       /* need to poison the objs */
-                       kmem_poison_obj(cachep, objp);
-               if (cachep->flags & SLAB_RED_ZONE) {
-                       if (*((unsigned long*)(objp)) != RED_MAGIC1)
-                               BUG();
-                       if (*((unsigned long*)(objp + cachep->objsize -
-                                       BYTES_PER_WORD)) != RED_MAGIC1)
-                               BUG();
-               }
+        if (cachep->flags & SLAB_RED_ZONE)
+            objp -= BYTES_PER_WORD;
+        if (cachep->flags & SLAB_POISON)
+            /* need to poison the objs */
+            kmem_poison_obj(cachep, objp);
+        if (cachep->flags & SLAB_RED_ZONE) {
+            if (*((unsigned long*)(objp)) != RED_MAGIC1)
+                BUG();
+            if (*((unsigned long*)(objp + cachep->objsize -
+                                   BYTES_PER_WORD)) != RED_MAGIC1)
+                BUG();
+        }
 #endif
-               slab_bufctl(slabp)[i] = i+1;
-       }
-       slab_bufctl(slabp)[i-1] = BUFCTL_END;
-       slabp->free = 0;
+        slab_bufctl(slabp)[i] = i+1;
+    }
+    slab_bufctl(slabp)[i-1] = BUFCTL_END;
+    slabp->free = 0;
 }
 
 /*
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int kmem_cache_grow (kmem_cache_t * cachep, int flags)
+static int kmem_cache_grow(kmem_cache_t * cachep)
 {
-       slab_t  *slabp;
-       struct pfn_info *page; unsigned int i;
-       void            *objp;
-       size_t           offset;
-       unsigned int     local_flags;
-       unsigned long    ctor_flags;
-       unsigned long    save_flags;
-
-       /* Be lazy and only check for valid flags here,
-        * keeping it out of the critical path in kmem_cache_alloc().
-        */
-       if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
-               BUG();
-       if (flags & SLAB_NO_GROW)
-               return 0;
-
-       ctor_flags = SLAB_CTOR_CONSTRUCTOR;
-       local_flags = (flags & SLAB_LEVEL_MASK);
-       if (local_flags == SLAB_ATOMIC)
-               /*
-                * Not allowed to sleep.  Need to tell a constructor about
-                * this - it might need to know...
-                */
-               ctor_flags |= SLAB_CTOR_ATOMIC;
-
-       /* About to mess with non-constant members - lock. */
-       spin_lock_irqsave(&cachep->spinlock, save_flags);
-
-       /* Get colour for the slab, and cal the next value. */
-       offset = cachep->colour_next;
-       cachep->colour_next++;
-       if (cachep->colour_next >= cachep->colour)
-               cachep->colour_next = 0;
-       offset *= cachep->colour_off;
-       cachep->dflags |= DFLGS_GROWN;
-
-       cachep->growing++;
-       spin_unlock_irqrestore(&cachep->spinlock, save_flags);
-
-       /* A series of memory allocations for a new slab.
-        * Neither the cache-chain semaphore, or cache-lock, are
-        * held, but the incrementing c_growing prevents this
-        * cache from being reaped or shrunk.
-        * Note: The cache could be selected in for reaping in
-        * kmem_cache_reap(), but when the final test is made the
-        * growing value will be seen.
-        */
-
-       /* Get mem for the objs. */
-       if (!(objp = kmem_getpages(cachep, flags)))
-               goto failed;
-
-       /* Get slab management. */
-       if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, local_flags)))
-               goto opps1;
-
-       /* Nasty!!!!!! I hope this is OK. */
-       i = 1 << cachep->gfporder;
-       page = virt_to_page(objp);
-       do {
-               SET_PAGE_CACHE(page, cachep);
-               SET_PAGE_SLAB(page, slabp);
-               PageSetSlab(page);
-               page++;
-       } while (--i);
-
-       kmem_cache_init_objs(cachep, slabp, ctor_flags);
-
-       spin_lock_irqsave(&cachep->spinlock, save_flags);
-       cachep->growing--;
-
-       /* Make slab active. */
-       list_add_tail(&slabp->list, &cachep->slabs_free);
-       STATS_INC_GROWN(cachep);
-       cachep->failures = 0;
-
-       spin_unlock_irqrestore(&cachep->spinlock, save_flags);
-       return 1;
-opps1:
-       kmem_freepages(cachep, objp);
-failed:
-       spin_lock_irqsave(&cachep->spinlock, save_flags);
-       cachep->growing--;
-       spin_unlock_irqrestore(&cachep->spinlock, save_flags);
-       return 0;
+    slab_t     *slabp;
+    struct pfn_info    *page; unsigned int i;
+    void               *objp;
+    size_t              offset;
+    unsigned long       ctor_flags;
+    unsigned long       save_flags;
+
+    ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+
+    /* About to mess with non-constant members - lock. */
+    spin_lock_irqsave(&cachep->spinlock, save_flags);
+
+    /* Get colour for the slab, and cal the next value. */
+    offset = cachep->colour_next;
+    cachep->colour_next++;
+    if (cachep->colour_next >= cachep->colour)
+        cachep->colour_next = 0;
+    offset *= cachep->colour_off;
+    cachep->dflags |= DFLGS_GROWN;
+
+    cachep->growing++;
+    spin_unlock_irqrestore(&cachep->spinlock, save_flags);
+
+    /* A series of memory allocations for a new slab.
+     * Neither the cache-chain semaphore, or cache-lock, are
+     * held, but the incrementing c_growing prevents this
+     * cache from being reaped or shrunk.
+     * Note: The cache could be selected in for reaping in
+     * kmem_cache_reap(), but when the final test is made the
+     * growing value will be seen.
+     */
+
+    /* Get mem for the objs. */
+    if (!(objp = kmem_getpages(cachep)))
+        goto failed;
+
+    /* Get slab management. */
+    if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, 0)))
+        goto opps1;
+
+    /* Nasty!!!!!! I hope this is OK. */
+    i = 1 << cachep->gfporder;
+    page = virt_to_page(objp);
+    do {
+        SET_PAGE_CACHE(page, cachep);
+        SET_PAGE_SLAB(page, slabp);
+        PageSetSlab(page);
+        page++;
+    } while (--i);
+
+    kmem_cache_init_objs(cachep, slabp, ctor_flags);
+
+    spin_lock_irqsave(&cachep->spinlock, save_flags);
+    cachep->growing--;
+
+    /* Make slab active. */
+    list_add_tail(&slabp->list, &cachep->slabs_free);
+    STATS_INC_GROWN(cachep);
+    cachep->failures = 0;
+
+    spin_unlock_irqrestore(&cachep->spinlock, save_flags);
+    return 1;
+ opps1:
+    kmem_freepages(cachep, objp);
+ failed:
+    spin_lock_irqsave(&cachep->spinlock, save_flags);
+    cachep->growing--;
+    spin_unlock_irqrestore(&cachep->spinlock, save_flags);
+    return 0;
 }
 
 /*
@@ -1192,70 +1138,59 @@ failed:
 
 #if DEBUG
 static int kmem_extra_free_checks (kmem_cache_t * cachep,
-                       slab_t *slabp, void * objp)
+                                   slab_t *slabp, void * objp)
 {
-       int i;
-       unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
-
-       if (objnr >= cachep->num)
-               BUG();
-       if (objp != slabp->s_mem + objnr*cachep->objsize)
-               BUG();
-
-       /* Check slab's freelist to see if this obj is there. */
-       for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
-               if (i == objnr)
-                       BUG();
-       }
-       return 0;
+    int i;
+    unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
+
+    if (objnr >= cachep->num)
+        BUG();
+    if (objp != slabp->s_mem + objnr*cachep->objsize)
+        BUG();
+
+    /* Check slab's freelist to see if this obj is there. */
+    for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
+        if (i == objnr)
+            BUG();
+    }
+    return 0;
 }
 #endif
 
-static inline void kmem_cache_alloc_head(kmem_cache_t *cachep, int flags)
-{
-       if (flags & SLAB_DMA) {
-               if (!(cachep->gfpflags & GFP_DMA))
-                       BUG();
-       } else {
-               if (cachep->gfpflags & GFP_DMA)
-                       BUG();
-       }
-}
-
 static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
                                                slab_t *slabp)
 {
-       void *objp;
+    void *objp;
 
-       STATS_INC_ALLOCED(cachep);
-       STATS_INC_ACTIVE(cachep);
-       STATS_SET_HIGH(cachep);
+    STATS_INC_ALLOCED(cachep);
+    STATS_INC_ACTIVE(cachep);
+    STATS_SET_HIGH(cachep);
 
-       /* get obj pointer */
-       slabp->inuse++;
-       objp = slabp->s_mem + slabp->free*cachep->objsize;
-       slabp->free=slab_bufctl(slabp)[slabp->free];
+    /* get obj pointer */
+    slabp->inuse++;
+    objp = slabp->s_mem + slabp->free*cachep->objsize;
+    slabp->free=slab_bufctl(slabp)[slabp->free];
 
-       if (unlikely(slabp->free == BUFCTL_END)) {
-               list_del(&slabp->list);
-               list_add(&slabp->list, &cachep->slabs_full);
-       }
+    if (unlikely(slabp->free == BUFCTL_END)) {
+        list_del(&slabp->list);
+        list_add(&slabp->list, &cachep->slabs_full);
+    }
 #if DEBUG
-       if (cachep->flags & SLAB_POISON)
-               if (kmem_check_poison_obj(cachep, objp))
-                       BUG();
-       if (cachep->flags & SLAB_RED_ZONE) {
-               /* Set alloc red-zone, and check old one. */
-               if (xchg((unsigned long *)objp, RED_MAGIC2) !=
-                                                        RED_MAGIC1)
-                       BUG();
-               if (xchg((unsigned long *)(objp+cachep->objsize -
-                         BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)
-                       BUG();
-               objp += BYTES_PER_WORD;
-       }
+    if (cachep->flags & SLAB_POISON)
+        if (kmem_check_poison_obj(cachep, objp))
+            BUG();
+    if (cachep->flags & SLAB_RED_ZONE) {
+        /* Set alloc red-zone, and check old one. */
+        if (xchg((unsigned long *)objp, RED_MAGIC2) !=
+            RED_MAGIC1)
+            BUG();
+        if (xchg((unsigned long *)(objp+cachep->objsize -
+                                   BYTES_PER_WORD), RED_MAGIC2) != RED_MAGIC1)
+            BUG();
+        objp += BYTES_PER_WORD;
+    }
 #endif
-       return objp;
+    return objp;
 }
 
 /*
@@ -1285,85 +1220,84 @@ static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
 })
 
 #ifdef CONFIG_SMP
-void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
+void* kmem_cache_alloc_batch(kmem_cache_t* cachep)
 {
-       int batchcount = cachep->batchcount;
-       cpucache_t* cc = cc_data(cachep);
-
-       spin_lock(&cachep->spinlock);
-       while (batchcount--) {
-               struct list_head * slabs_partial, * entry;
-               slab_t *slabp;
-               /* Get slab alloc is to come from. */
-               slabs_partial = &(cachep)->slabs_partial;
-               entry = slabs_partial->next;
-               if (unlikely(entry == slabs_partial)) {
-                       struct list_head * slabs_free;
-                       slabs_free = &(cachep)->slabs_free;
-                       entry = slabs_free->next;
-                       if (unlikely(entry == slabs_free))
-                               break;
-                       list_del(entry);
-                       list_add(entry, slabs_partial);
-               }
-
-               slabp = list_entry(entry, slab_t, list);
-               cc_entry(cc)[cc->avail++] =
-                               kmem_cache_alloc_one_tail(cachep, slabp);
-       }
-       spin_unlock(&cachep->spinlock);
-
-       if (cc->avail)
-               return cc_entry(cc)[--cc->avail];
-       return NULL;
+    int batchcount = cachep->batchcount;
+    cpucache_t* cc = cc_data(cachep);
+
+    spin_lock(&cachep->spinlock);
+    while (batchcount--) {
+        struct list_head * slabs_partial, * entry;
+        slab_t *slabp;
+        /* Get slab alloc is to come from. */
+        slabs_partial = &(cachep)->slabs_partial;
+        entry = slabs_partial->next;
+        if (unlikely(entry == slabs_partial)) {
+            struct list_head * slabs_free;
+            slabs_free = &(cachep)->slabs_free;
+            entry = slabs_free->next;
+            if (unlikely(entry == slabs_free))
+                break;
+            list_del(entry);
+            list_add(entry, slabs_partial);
+        }
+
+        slabp = list_entry(entry, slab_t, list);
+        cc_entry(cc)[cc->avail++] =
+            kmem_cache_alloc_one_tail(cachep, slabp);
+    }
+    spin_unlock(&cachep->spinlock);
+
+    if (cc->avail)
+        return cc_entry(cc)[--cc->avail];
+    return NULL;
 }
 #endif
 
-static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
 {
-       unsigned long save_flags;
-       void* objp;
+    unsigned long flags;
+    void* objp;
 
-       kmem_cache_alloc_head(cachep, flags);
-try_again:
-       local_irq_save(save_flags);
+ try_again:
+    local_irq_save(flags);
 #ifdef CONFIG_SMP
-       {
-               cpucache_t *cc = cc_data(cachep);
-
-               if (cc) {
-                       if (cc->avail) {
-                               STATS_INC_ALLOCHIT(cachep);
-                               objp = cc_entry(cc)[--cc->avail];
-                       } else {
-                               STATS_INC_ALLOCMISS(cachep);
-                               objp = kmem_cache_alloc_batch(cachep,flags);
-                               if (!objp)
-                                       goto alloc_new_slab_nolock;
-                       }
-               } else {
-                       spin_lock(&cachep->spinlock);
-                       objp = kmem_cache_alloc_one(cachep);
-                       spin_unlock(&cachep->spinlock);
-               }
-       }
+    {
+        cpucache_t *cc = cc_data(cachep);
+
+        if (cc) {
+            if (cc->avail) {
+                STATS_INC_ALLOCHIT(cachep);
+                objp = cc_entry(cc)[--cc->avail];
+            } else {
+                STATS_INC_ALLOCMISS(cachep);
+                objp = kmem_cache_alloc_batch(cachep);
+                if (!objp)
+                    goto alloc_new_slab_nolock;
+            }
+        } else {
+            spin_lock(&cachep->spinlock);
+            objp = kmem_cache_alloc_one(cachep);
+            spin_unlock(&cachep->spinlock);
+        }
+    }
 #else
-       objp = kmem_cache_alloc_one(cachep);
+    objp = kmem_cache_alloc_one(cachep);
 #endif
-       local_irq_restore(save_flags);
-       return objp;
-alloc_new_slab:
+    local_irq_restore(flags);
+    return objp;
+ alloc_new_slab:
 #ifdef CONFIG_SMP
-       spin_unlock(&cachep->spinlock);
-alloc_new_slab_nolock:
+    spin_unlock(&cachep->spinlock);
+ alloc_new_slab_nolock:
 #endif
-       local_irq_restore(save_flags);
-       if (kmem_cache_grow(cachep, flags))
-               /* Someone may have stolen our objs.  Doesn't matter, we'll
-                * just come back here again.
-                */
-               goto try_again;
-       return NULL;
+    local_irq_restore(flags);
+    if (kmem_cache_grow(cachep))
+        /* Someone may have stolen our objs.  Doesn't matter, we'll
+         * just come back here again.
+         */
+        goto try_again;
+    return NULL;
 }
 
 /*
@@ -1397,76 +1331,76 @@ alloc_new_slab_nolock:
 
 static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
 {
-       slab_t* slabp;
+    slab_t* slabp;
 
-       CHECK_PAGE(virt_to_page(objp));
-       /* reduces memory footprint
-        *
-       if (OPTIMIZE(cachep))
-               slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
-        else
-        */
-       slabp = GET_PAGE_SLAB(virt_to_page(objp));
+    CHECK_PAGE(virt_to_page(objp));
+    /* reduces memory footprint
+     *
+     if (OPTIMIZE(cachep))
+     slabp = (void*)((unsigned long)objp&(~(PAGE_SIZE-1)));
+     else
+    */
+    slabp = GET_PAGE_SLAB(virt_to_page(objp));
 
 #if DEBUG
-       if (cachep->flags & SLAB_DEBUG_INITIAL)
-               /* Need to call the slab's constructor so the
-                * caller can perform a verify of its state (debugging).
-                * Called without the cache-lock held.
-                */
-               cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
-
-       if (cachep->flags & SLAB_RED_ZONE) {
-               objp -= BYTES_PER_WORD;
-               if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
-                       /* Either write before start, or a double free. */
-                       BUG();
-               if (xchg((unsigned long *)(objp+cachep->objsize -
-                               BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
-                       /* Either write past end, or a double free. */
-                       BUG();
-       }
-       if (cachep->flags & SLAB_POISON)
-               kmem_poison_obj(cachep, objp);
-       if (kmem_extra_free_checks(cachep, slabp, objp))
-               return;
+    if (cachep->flags & SLAB_DEBUG_INITIAL)
+        /* Need to call the slab's constructor so the
+         * caller can perform a verify of its state (debugging).
+         * Called without the cache-lock held.
+         */
+        cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
+
+    if (cachep->flags & SLAB_RED_ZONE) {
+        objp -= BYTES_PER_WORD;
+        if (xchg((unsigned long *)objp, RED_MAGIC1) != RED_MAGIC2)
+            /* Either write before start, or a double free. */
+            BUG();
+        if (xchg((unsigned long *)(objp+cachep->objsize -
+                                   BYTES_PER_WORD), RED_MAGIC1) != RED_MAGIC2)
+            /* Either write past end, or a double free. */
+            BUG();
+    }
+    if (cachep->flags & SLAB_POISON)
+        kmem_poison_obj(cachep, objp);
+    if (kmem_extra_free_checks(cachep, slabp, objp))
+        return;
 #endif
-       {
-               unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
+    {
+        unsigned int objnr = (objp-slabp->s_mem)/cachep->objsize;
 
-               slab_bufctl(slabp)[objnr] = slabp->free;
-               slabp->free = objnr;
-       }
-       STATS_DEC_ACTIVE(cachep);
+        slab_bufctl(slabp)[objnr] = slabp->free;
+        slabp->free = objnr;
+    }
+    STATS_DEC_ACTIVE(cachep);
        
-       /* fixup slab chains */
-       {
-               int inuse = slabp->inuse;
-               if (unlikely(!--slabp->inuse)) {
-                       /* Was partial or full, now empty. */
-                       list_del(&slabp->list);
-                       list_add(&slabp->list, &cachep->slabs_free);
-               } else if (unlikely(inuse == cachep->num)) {
-                       /* Was full. */
-                       list_del(&slabp->list);
-                       list_add(&slabp->list, &cachep->slabs_partial);
-               }
-       }
+    /* fixup slab chains */
+    {
+        int inuse = slabp->inuse;
+        if (unlikely(!--slabp->inuse)) {
+            /* Was partial or full, now empty. */
+            list_del(&slabp->list);
+            list_add(&slabp->list, &cachep->slabs_free);
+        } else if (unlikely(inuse == cachep->num)) {
+            /* Was full. */
+            list_del(&slabp->list);
+            list_add(&slabp->list, &cachep->slabs_partial);
+        }
+    }
 }
 
 #ifdef CONFIG_SMP
 static inline void __free_block (kmem_cache_t* cachep,
-                                                       void** objpp, int len)
+                                 void** objpp, int len)
 {
-       for ( ; len > 0; len--, objpp++)
-               kmem_cache_free_one(cachep, *objpp);
+    for ( ; len > 0; len--, objpp++)
+        kmem_cache_free_one(cachep, *objpp);
 }
 
 static void free_block (kmem_cache_t* cachep, void** objpp, int len)
 {
-       spin_lock(&cachep->spinlock);
-       __free_block(cachep, objpp, len);
-       spin_unlock(&cachep->spinlock);
+    spin_lock(&cachep->spinlock);
+    __free_block(cachep, objpp, len);
+    spin_unlock(&cachep->spinlock);
 }
 #endif
 
@@ -1477,76 +1411,57 @@ static void free_block (kmem_cache_t* cachep, void** objpp, int len)
 static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
 {
 #ifdef CONFIG_SMP
-       cpucache_t *cc = cc_data(cachep);
-
-       CHECK_PAGE(virt_to_page(objp));
-       if (cc) {
-               int batchcount;
-               if (cc->avail < cc->limit) {
-                       STATS_INC_FREEHIT(cachep);
-                       cc_entry(cc)[cc->avail++] = objp;
-                       return;
-               }
-               STATS_INC_FREEMISS(cachep);
-               batchcount = cachep->batchcount;
-               cc->avail -= batchcount;
-               free_block(cachep,
-                                       &cc_entry(cc)[cc->avail],batchcount);
-               cc_entry(cc)[cc->avail++] = objp;
-               return;
-       } else {
-               free_block(cachep, &objp, 1);
-       }
+    cpucache_t *cc = cc_data(cachep);
+
+    CHECK_PAGE(virt_to_page(objp));
+    if (cc) {
+        int batchcount;
+        if (cc->avail < cc->limit) {
+            STATS_INC_FREEHIT(cachep);
+            cc_entry(cc)[cc->avail++] = objp;
+            return;
+        }
+        STATS_INC_FREEMISS(cachep);
+        batchcount = cachep->batchcount;
+        cc->avail -= batchcount;
+        free_block(cachep,
+                   &cc_entry(cc)[cc->avail],batchcount);
+        cc_entry(cc)[cc->avail++] = objp;
+        return;
+    } else {
+        free_block(cachep, &objp, 1);
+    }
 #else
-       kmem_cache_free_one(cachep, objp);
+    kmem_cache_free_one(cachep, objp);
 #endif
 }
 
 /**
  * kmem_cache_alloc - Allocate an object
  * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
  *
  * Allocate an object from this cache.  The flags are only relevant
  * if the cache has no available objects.
  */
-void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+void *kmem_cache_alloc(kmem_cache_t *cachep)
 {
-       return __kmem_cache_alloc(cachep, flags);
+    return __kmem_cache_alloc(cachep);
 }
 
 /**
  * kmalloc - allocate memory
  * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- *
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user.  May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep.  Use inside interrupt handlers.
- *
- * Additionally, the %GFP_DMA flag may be set to indicate the memory
- * must be suitable for DMA.  This can mean different things on different
- * platforms.  For example, on i386, it means that the memory must come
- * from the first 16MB.
  */
-void * kmalloc (size_t size, int flags)
+void *kmalloc(size_t size)
 {
-       cache_sizes_t *csizep = cache_sizes;
-
-       for (; csizep->cs_size; csizep++) {
-               if (size > csizep->cs_size)
-                       continue;
-               return __kmem_cache_alloc(flags & GFP_DMA ?
-                        csizep->cs_dmacachep : csizep->cs_cachep, flags);
-       }
-       return NULL;
+    cache_sizes_t *csizep = cache_sizes;
+
+    for (; csizep->cs_size; csizep++) {
+        if (size > csizep->cs_size)
+            continue;
+        return __kmem_cache_alloc(csizep->cs_cachep);
+    }
+    return NULL;
 }
 
 /**
@@ -1559,16 +1474,16 @@ void * kmalloc (size_t size, int flags)
  */
 void kmem_cache_free (kmem_cache_t *cachep, void *objp)
 {
-       unsigned long flags;
+    unsigned long flags;
 #if DEBUG
-       CHECK_PAGE(virt_to_page(objp));
-       if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
-               BUG();
+    CHECK_PAGE(virt_to_page(objp));
+    if (cachep != GET_PAGE_CACHE(virt_to_page(objp)))
+        BUG();
 #endif
 
-       local_irq_save(flags);
-       __kmem_cache_free(cachep, objp);
-       local_irq_restore(flags);
+    local_irq_save(flags);
+    __kmem_cache_free(cachep, objp);
+    local_irq_restore(flags);
 }
 
 /**
@@ -1580,32 +1495,32 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
  */
 void kfree (const void *objp)
 {
-       kmem_cache_t *c;
-       unsigned long flags;
-
-       if (!objp)
-               return;
-       local_irq_save(flags);
-       CHECK_PAGE(virt_to_page(objp));
-       c = GET_PAGE_CACHE(virt_to_page(objp));
-       __kmem_cache_free(c, (void*)objp);
-       local_irq_restore(flags);
+    kmem_cache_t *c;
+    unsigned long flags;
+
+    if (!objp)
+        return;
+    local_irq_save(flags);
+    CHECK_PAGE(virt_to_page(objp));
+    c = GET_PAGE_CACHE(virt_to_page(objp));
+    __kmem_cache_free(c, (void*)objp);
+    local_irq_restore(flags);
 }
 
-kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
+kmem_cache_t *kmem_find_general_cachep(size_t size)
 {
-       cache_sizes_t *csizep = cache_sizes;
-
-       /* This function could be moved to the header file, and
-        * made inline so consumers can quickly determine what
-        * cache pointer they require.
-        */
-       for ( ; csizep->cs_size; csizep++) {
-               if (size > csizep->cs_size)
-                       continue;
-               break;
-       }
-       return (gfpflags & GFP_DMA) ? csizep->cs_dmacachep : csizep->cs_cachep;
+    cache_sizes_t *csizep = cache_sizes;
+
+    /* This function could be moved to the header file, and
+     * made inline so consumers can quickly determine what
+     * cache pointer they require.
+     */
+    for ( ; csizep->cs_size; csizep++) {
+        if (size > csizep->cs_size)
+            continue;
+        break;
+    }
+    return csizep->cs_cachep;
 }
 
 #ifdef CONFIG_SMP
@@ -1613,328 +1528,321 @@ kmem_cache_t * kmem_find_general_cachep (size_t size, int gfpflags)
 /* called with cache_chain_sem acquired.  */
 static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
 {
-       ccupdate_struct_t new;
-       int i;
-
-       /*
-        * These are admin-provided, so we are more graceful.
-        */
-       if (limit < 0)
-               return -EINVAL;
-       if (batchcount < 0)
-               return -EINVAL;
-       if (batchcount > limit)
-               return -EINVAL;
-       if (limit != 0 && !batchcount)
-               return -EINVAL;
-
-       memset(&new.new,0,sizeof(new.new));
-       if (limit) {
-               for (i = 0; i< smp_num_cpus; i++) {
-                       cpucache_t* ccnew;
-
-                       ccnew = kmalloc(sizeof(void*)*limit+
-                                       sizeof(cpucache_t), GFP_KERNEL);
-                       if (!ccnew)
-                               goto oom;
-                       ccnew->limit = limit;
-                       ccnew->avail = 0;
-                       new.new[cpu_logical_map(i)] = ccnew;
-               }
-       }
-       new.cachep = cachep;
-       spin_lock_irq(&cachep->spinlock);
-       cachep->batchcount = batchcount;
-       spin_unlock_irq(&cachep->spinlock);
-
-       smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
-
-       for (i = 0; i < smp_num_cpus; i++) {
-               cpucache_t* ccold = new.new[cpu_logical_map(i)];
-               if (!ccold)
-                       continue;
-               local_irq_disable();
-               free_block(cachep, cc_entry(ccold), ccold->avail);
-               local_irq_enable();
-               kfree(ccold);
-       }
-       return 0;
-oom:
-       for (i--; i >= 0; i--)
-               kfree(new.new[cpu_logical_map(i)]);
-       return -ENOMEM;
+    ccupdate_struct_t new;
+    int i;
+
+    /*
+     * These are admin-provided, so we are more graceful.
+     */
+    if (limit < 0)
+        return -EINVAL;
+    if (batchcount < 0)
+        return -EINVAL;
+    if (batchcount > limit)
+        return -EINVAL;
+    if (limit != 0 && !batchcount)
+        return -EINVAL;
+
+    memset(&new.new,0,sizeof(new.new));
+    if (limit) {
+        for (i = 0; i< smp_num_cpus; i++) {
+            cpucache_t* ccnew;
+
+            ccnew = kmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
+            if (!ccnew)
+                goto oom;
+            ccnew->limit = limit;
+            ccnew->avail = 0;
+            new.new[cpu_logical_map(i)] = ccnew;
+        }
+    }
+    new.cachep = cachep;
+    spin_lock_irq(&cachep->spinlock);
+    cachep->batchcount = batchcount;
+    spin_unlock_irq(&cachep->spinlock);
+
+    smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
+
+    for (i = 0; i < smp_num_cpus; i++) {
+        cpucache_t* ccold = new.new[cpu_logical_map(i)];
+        if (!ccold)
+            continue;
+        local_irq_disable();
+        free_block(cachep, cc_entry(ccold), ccold->avail);
+        local_irq_enable();
+        kfree(ccold);
+    }
+    return 0;
+ oom:
+    for (i--; i >= 0; i--)
+        kfree(new.new[cpu_logical_map(i)]);
+    return -ENOMEM;
 }
 
 static void enable_cpucache (kmem_cache_t *cachep)
 {
-       int err;
-       int limit;
-
-       /* FIXME: optimize */
-       if (cachep->objsize > PAGE_SIZE)
-               return;
-       if (cachep->objsize > 1024)
-               limit = 60;
-       else if (cachep->objsize > 256)
-               limit = 124;
-       else
-               limit = 252;
-
-       err = kmem_tune_cpucache(cachep, limit, limit/2);
-       if (err)
-               printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
-                                       cachep->name, -err);
+    int err;
+    int limit;
+
+    /* FIXME: optimize */
+    if (cachep->objsize > PAGE_SIZE)
+        return;
+    if (cachep->objsize > 1024)
+        limit = 60;
+    else if (cachep->objsize > 256)
+        limit = 124;
+    else
+        limit = 252;
+
+    err = kmem_tune_cpucache(cachep, limit, limit/2);
+    if (err)
+        printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
+               cachep->name, -err);
 }
 
 static void enable_all_cpucaches (void)
 {
-       struct list_head* p;
-        unsigned long spin_flags;
+    struct list_head* p;
+    unsigned long spin_flags;
 
-       down(&cache_chain_sem);
+    down(&cache_chain_sem);
 
-       p = &cache_cache.next;
-       do {
-               kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
+    p = &cache_cache.next;
+    do {
+        kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
 
-               enable_cpucache(cachep);
-               p = cachep->next.next;
-       } while (p != &cache_cache.next);
+        enable_cpucache(cachep);
+        p = cachep->next.next;
+    } while (p != &cache_cache.next);
 
-       up(&cache_chain_sem);
+    up(&cache_chain_sem);
 }
 #endif
 
 /**
  * kmem_cache_reap - Reclaim memory from caches.
- * @gfp_mask: the type of memory required.
- *
- * Called from do_try_to_free_pages() and __alloc_pages()
  */
-int kmem_cache_reap (int gfp_mask)
+int kmem_cache_reap(void)
 {
-       slab_t *slabp;
-       kmem_cache_t *searchp;
-       kmem_cache_t *best_cachep;
-       unsigned int best_pages;
-       unsigned int best_len;
-       unsigned int scan;
-       int ret = 0;
-        unsigned long spin_flags;
-
-        down(&cache_chain_sem);
-
-       scan = REAP_SCANLEN;
-       best_len = 0;
-       best_pages = 0;
-       best_cachep = NULL;
-       searchp = clock_searchp;
-       do {
-               unsigned int pages;
-               struct list_head* p;
-               unsigned int full_free;
-
-               /* It's safe to test this without holding the cache-lock. */
-               if (searchp->flags & SLAB_NO_REAP)
-                       goto next;
-               spin_lock_irq(&searchp->spinlock);
-               if (searchp->growing)
-                       goto next_unlock;
-               if (searchp->dflags & DFLGS_GROWN) {
-                       searchp->dflags &= ~DFLGS_GROWN;
-                       goto next_unlock;
-               }
+    slab_t *slabp;
+    kmem_cache_t *searchp;
+    kmem_cache_t *best_cachep;
+    unsigned int best_pages;
+    unsigned int best_len;
+    unsigned int scan;
+    int ret = 0;
+    unsigned long spin_flags;
+
+    down(&cache_chain_sem);
+
+    scan = REAP_SCANLEN;
+    best_len = 0;
+    best_pages = 0;
+    best_cachep = NULL;
+    searchp = clock_searchp;
+    do {
+        unsigned int pages;
+        struct list_head* p;
+        unsigned int full_free;
+
+        /* It's safe to test this without holding the cache-lock. */
+        if (searchp->flags & SLAB_NO_REAP)
+            goto next;
+        spin_lock_irq(&searchp->spinlock);
+        if (searchp->growing)
+            goto next_unlock;
+        if (searchp->dflags & DFLGS_GROWN) {
+            searchp->dflags &= ~DFLGS_GROWN;
+            goto next_unlock;
+        }
 #ifdef CONFIG_SMP
-               {
-                       cpucache_t *cc = cc_data(searchp);
-                       if (cc && cc->avail) {
-                               __free_block(searchp, cc_entry(cc), cc->avail);
-                               cc->avail = 0;
-                       }
-               }
+        {
+            cpucache_t *cc = cc_data(searchp);
+            if (cc && cc->avail) {
+                __free_block(searchp, cc_entry(cc), cc->avail);
+                cc->avail = 0;
+            }
+        }
 #endif
 
-               full_free = 0;
-               p = searchp->slabs_free.next;
-               while (p != &searchp->slabs_free) {
-                       slabp = list_entry(p, slab_t, list);
+        full_free = 0;
+        p = searchp->slabs_free.next;
+        while (p != &searchp->slabs_free) {
+            slabp = list_entry(p, slab_t, list);
 #if DEBUG
-                       if (slabp->inuse)
-                               BUG();
+            if (slabp->inuse)
+                BUG();
 #endif
-                       full_free++;
-                       p = p->next;
-               }
-
-               /*
-                * Try to avoid slabs with constructors and/or
-                * more than one page per slab (as it can be difficult
-                * to get high orders from gfp()).
-                */
-               pages = full_free * (1<<searchp->gfporder);
-               if (searchp->ctor)
-                       pages = (pages*4+1)/5;
-               if (searchp->gfporder)
-                       pages = (pages*4+1)/5;
-               if (pages > best_pages) {
-                       best_cachep = searchp;
-                       best_len = full_free;
-                       best_pages = pages;
-                       if (pages >= REAP_PERFECT) {
-                               clock_searchp = list_entry(searchp->next.next,
-                                                       kmem_cache_t,next);
-                               goto perfect;
-                       }
-               }
-next_unlock:
-               spin_unlock_irq(&searchp->spinlock);
-next:
-               searchp = list_entry(searchp->next.next,kmem_cache_t,next);
-       } while (--scan && searchp != clock_searchp);
-
-       clock_searchp = searchp;
-
-       if (!best_cachep)
-               /* couldn't find anything to reap */
-               goto out;
-
-       spin_lock_irq(&best_cachep->spinlock);
-perfect:
-       /* free only 50% of the free slabs */
-       best_len = (best_len + 1)/2;
-       for (scan = 0; scan < best_len; scan++) {
-               struct list_head *p;
-
-               if (best_cachep->growing)
-                       break;
-               p = best_cachep->slabs_free.prev;
-               if (p == &best_cachep->slabs_free)
-                       break;
-               slabp = list_entry(p,slab_t,list);
+            full_free++;
+            p = p->next;
+        }
+
+        /*
+         * Try to avoid slabs with constructors and/or
+         * more than one page per slab (as it can be difficult
+         * to get high orders from gfp()).
+         */
+        pages = full_free * (1<<searchp->gfporder);
+        if (searchp->ctor)
+            pages = (pages*4+1)/5;
+        if (searchp->gfporder)
+            pages = (pages*4+1)/5;
+        if (pages > best_pages) {
+            best_cachep = searchp;
+            best_len = full_free;
+            best_pages = pages;
+            if (pages >= REAP_PERFECT) {
+                clock_searchp = list_entry(searchp->next.next,
+                                           kmem_cache_t,next);
+                goto perfect;
+            }
+        }
+    next_unlock:
+        spin_unlock_irq(&searchp->spinlock);
+    next:
+        searchp = list_entry(searchp->next.next,kmem_cache_t,next);
+    } while (--scan && searchp != clock_searchp);
+
+    clock_searchp = searchp;
+
+    if (!best_cachep)
+        /* couldn't find anything to reap */
+        goto out;
+
+    spin_lock_irq(&best_cachep->spinlock);
+ perfect:
+    /* free only 50% of the free slabs */
+    best_len = (best_len + 1)/2;
+    for (scan = 0; scan < best_len; scan++) {
+        struct list_head *p;
+
+        if (best_cachep->growing)
+            break;
+        p = best_cachep->slabs_free.prev;
+        if (p == &best_cachep->slabs_free)
+            break;
+        slabp = list_entry(p,slab_t,list);
 #if DEBUG
-               if (slabp->inuse)
-                       BUG();
+        if (slabp->inuse)
+            BUG();
 #endif
-               list_del(&slabp->list);
-               STATS_INC_REAPED(best_cachep);
-
-               /* Safe to drop the lock. The slab is no longer linked to the
-                * cache.
-                */
-               spin_unlock_irq(&best_cachep->spinlock);
-               kmem_slab_destroy(best_cachep, slabp);
-               spin_lock_irq(&best_cachep->spinlock);
-       }
-       spin_unlock_irq(&best_cachep->spinlock);
-       ret = scan * (1 << best_cachep->gfporder);
-out:
-       up(&cache_chain_sem);
-       return ret;
+        list_del(&slabp->list);
+        STATS_INC_REAPED(best_cachep);
+
+        /* Safe to drop the lock. The slab is no longer linked to the
+         * cache.
+         */
+        spin_unlock_irq(&best_cachep->spinlock);
+        kmem_slab_destroy(best_cachep, slabp);
+        spin_lock_irq(&best_cachep->spinlock);
+    }
+    spin_unlock_irq(&best_cachep->spinlock);
+    ret = scan * (1 << best_cachep->gfporder);
+ out:
+    up(&cache_chain_sem);
+    return ret;
 }
 
 void dump_slabinfo()
 {
-       struct list_head *p;
-        unsigned long spin_flags;
+    struct list_head *p;
+    unsigned long spin_flags;
 
-       /* Output format version, so at least we can change it without _too_
-        * many complaints.
-        */
-       printk( "slabinfo - version: 1.1"
+    /* Output format version, so at least we can change it without _too_
+     * many complaints.
+     */
+    printk( "slabinfo - version: 1.1"
 #if STATS
-                               " (statistics)"
+            " (statistics)"
 #endif
 #ifdef CONFIG_SMP
-                               " (SMP)"
+            " (SMP)"
 #endif
-                               "\n");
-       down(&cache_chain_sem);
-       p = &cache_cache.next;
-       do {
-               kmem_cache_t    *cachep;
-               struct list_head *q;
-               slab_t          *slabp;
-               unsigned long   active_objs;
-               unsigned long   num_objs;
-               unsigned long   active_slabs = 0;
-               unsigned long   num_slabs;
-               cachep = list_entry(p, kmem_cache_t, next);
-
-               spin_lock_irq(&cachep->spinlock);
-               active_objs = 0;
-               num_slabs = 0;
-               list_for_each(q,&cachep->slabs_full) {
-                       slabp = list_entry(q, slab_t, list);
-                       if (slabp->inuse != cachep->num)
-                               BUG();
-                       active_objs += cachep->num;
-                       active_slabs++;
-               }
-               list_for_each(q,&cachep->slabs_partial) {
-                       slabp = list_entry(q, slab_t, list);
-                       if (slabp->inuse == cachep->num || !slabp->inuse)
-                               BUG();
-                       active_objs += slabp->inuse;
-                       active_slabs++;
-               }
-               list_for_each(q,&cachep->slabs_free) {
-                       slabp = list_entry(q, slab_t, list);
-                       if (slabp->inuse)
-                               BUG();
-                       num_slabs++;
-               }
-               num_slabs+=active_slabs;
-               num_objs = num_slabs*cachep->num;
-
-               printk("%-17s %6lu %6lu %6u %4lu %4lu %4u",
-                       cachep->name, active_objs, num_objs, cachep->objsize,
-                       active_slabs, num_slabs, (1<<cachep->gfporder));
+            "\n");
+    down(&cache_chain_sem);
+    p = &cache_cache.next;
+    do {
+        kmem_cache_t   *cachep;
+        struct list_head *q;
+        slab_t         *slabp;
+        unsigned long  active_objs;
+        unsigned long  num_objs;
+        unsigned long  active_slabs = 0;
+        unsigned long  num_slabs;
+        cachep = list_entry(p, kmem_cache_t, next);
+
+        spin_lock_irq(&cachep->spinlock);
+        active_objs = 0;
+        num_slabs = 0;
+        list_for_each(q,&cachep->slabs_full) {
+            slabp = list_entry(q, slab_t, list);
+            if (slabp->inuse != cachep->num)
+                BUG();
+            active_objs += cachep->num;
+            active_slabs++;
+        }
+        list_for_each(q,&cachep->slabs_partial) {
+            slabp = list_entry(q, slab_t, list);
+            if (slabp->inuse == cachep->num || !slabp->inuse)
+                BUG();
+            active_objs += slabp->inuse;
+            active_slabs++;
+        }
+        list_for_each(q,&cachep->slabs_free) {
+            slabp = list_entry(q, slab_t, list);
+            if (slabp->inuse)
+                BUG();
+            num_slabs++;
+        }
+        num_slabs+=active_slabs;
+        num_objs = num_slabs*cachep->num;
+
+        printk("%-17s %6lu %6lu %6u %4lu %4lu %4u",
+               cachep->name, active_objs, num_objs, cachep->objsize,
+               active_slabs, num_slabs, (1<<cachep->gfporder));
 
 #if STATS
-               {
-                       unsigned long errors = cachep->errors;
-                       unsigned long high = cachep->high_mark;
-                       unsigned long grown = cachep->grown;
-                       unsigned long reaped = cachep->reaped;
-                       unsigned long allocs = cachep->num_allocations;
-
-                       printk(" : %6lu %7lu %5lu %4lu %4lu",
-                                       high, allocs, grown, reaped, errors);
-               }
+        {
+            unsigned long errors = cachep->errors;
+            unsigned long high = cachep->high_mark;
+            unsigned long grown = cachep->grown;
+            unsigned long reaped = cachep->reaped;
+            unsigned long allocs = cachep->num_allocations;
+
+            printk(" : %6lu %7lu %5lu %4lu %4lu",
+                   high, allocs, grown, reaped, errors);
+        }
 #endif
 #ifdef CONFIG_SMP
-               {
-                       unsigned int batchcount = cachep->batchcount;
-                       unsigned int limit;
-
-                       if (cc_data(cachep))
-                               limit = cc_data(cachep)->limit;
-                        else
-                               limit = 0;
-                       printk(" : %4u %4u",
-                                       limit, batchcount);
-               }
+        {
+            unsigned int batchcount = cachep->batchcount;
+            unsigned int limit;
+
+            if (cc_data(cachep))
+                limit = cc_data(cachep)->limit;
+            else
+                limit = 0;
+            printk(" : %4u %4u",
+                   limit, batchcount);
+        }
 #endif
 #if STATS && defined(CONFIG_SMP)
-               {
-                       unsigned long allochit = atomic_read(&cachep->allochit);
-                       unsigned long allocmiss = atomic_read(&cachep->allocmiss);
-                       unsigned long freehit = atomic_read(&cachep->freehit);
-                       unsigned long freemiss = atomic_read(&cachep->freemiss);
-                       printk(" : %6lu %6lu %6lu %6lu",
-                                       allochit, allocmiss, freehit, freemiss);
-               }
+        {
+            unsigned long allochit = atomic_read(&cachep->allochit);
+            unsigned long allocmiss = atomic_read(&cachep->allocmiss);
+            unsigned long freehit = atomic_read(&cachep->freehit);
+            unsigned long freemiss = atomic_read(&cachep->freemiss);
+            printk(" : %6lu %6lu %6lu %6lu",
+                   allochit, allocmiss, freehit, freemiss);
+        }
 #endif
-               printk("\n");
-               spin_unlock_irq(&cachep->spinlock);
+        printk("\n");
+        spin_unlock_irq(&cachep->spinlock);
 
-               p = cachep->next.next;
-       } while (p != &cache_cache.next);
+        p = cachep->next.next;
+    } while (p != &cache_cache.next);
 
-       up(&cache_chain_sem);
+    up(&cache_chain_sem);
 
-       return;
+    return;
 }
-
-
-
index ec738c1b552012aa260adba4cb9ab3ef591b1c58..6d37ceeec1cbb4b7ffbf472e9f80877fd253e63d 100644 (file)
@@ -59,7 +59,7 @@ void init_trace_bufs(void)
     nr_pages = smp_num_cpus * opt_tbuf_size;
     order    = get_order(nr_pages * PAGE_SIZE);
     
-    if ( (rawbuf = (char *)__get_free_pages(GFP_KERNEL, order)) == NULL )
+    if ( (rawbuf = (char *)__get_free_pages(order)) == NULL )
     {
         printk("Xen trace buffers: memory allocation failed\n");
         return;
index 33b9a37a52c244cfdf90a8e90f12a7b6d650483d..26362bdee0cd0571d0e2897ef99e753d95825ae3 100644 (file)
@@ -300,7 +300,7 @@ long do_console_io(int cmd, int count, char *buffer)
     case CONSOLEIO_write:
         if ( count > (PAGE_SIZE-1) )
             count = PAGE_SIZE-1;
-        if ( (kbuf = (char *)get_free_page(GFP_KERNEL)) == NULL )
+        if ( (kbuf = (char *)get_free_page()) == NULL )
             return -ENOMEM;
         kbuf[count] = '\0';
         rc = count;
index c7dde554a12480666f8b939faefd9bd61130b88c..258d9d8c8ae0bc965c3cfbea2cdc04f8a756bcff 100644 (file)
@@ -1126,7 +1126,7 @@ static struct pci_bus * __devinit pci_alloc_bus(void)
 {
        struct pci_bus *b;
 
-       b = kmalloc(sizeof(*b), GFP_KERNEL);
+       b = kmalloc(sizeof(*b));
        if (b) {
                memset(b, 0, sizeof(*b));
                INIT_LIST_HEAD(&b->children);
@@ -1351,7 +1351,7 @@ struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
        if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
                return NULL;
 
-       dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+       dev = kmalloc(sizeof(*dev));
        if (!dev)
                return NULL;
 
@@ -1424,14 +1424,14 @@ unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
 {
        unsigned int devfn, max, pass;
        struct list_head *ln;
-       /* XEN MODIFICATION: Allocate 'dev0' on heap to avoid stack overflow. */
+       /* XEN MODIFICATION: Allocate dev0 on heap to avoid stack overflow. */
        struct pci_dev *dev, *dev0;
 
        DBG("Scanning bus %02x\n", bus->number);
        max = bus->secondary;
 
        /* Create a device template */
-       dev0 = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+       dev0 = kmalloc(sizeof(struct pci_dev));
        if(!dev0) {
          panic("Out of memory scanning PCI bus!\n");
        }
index fdd85c9f13651598386612403678c13fa7533d89..3486f7a1ed9275626175a9355e459ece9e84a79e 100644 (file)
@@ -171,7 +171,7 @@ pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
                                        ln->res->start;
                        }
                        if (r_align > align) {
-                               tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+                               tmp = kmalloc(sizeof(*tmp));
                                if (!tmp)
                                        panic("pdev_sort_resources(): "
                                              "kmalloc() failed!\n");
index 61996d4ccccf0bed1a32f392a12871680d383dfe..95cbb4b5145a53f6cafbe98701452ea16837cbc6 100644 (file)
@@ -33,9 +33,6 @@ typedef struct { unsigned long pt_lo; } pagetable_t;
 #define l2_pgentry_val(_x) ((_x).l2_lo)
 #define pagetable_val(_x)  ((_x).pt_lo)
 
-#define alloc_l1_pagetable()  ((l1_pgentry_t *)get_free_page(GFP_KERNEL))
-#define alloc_l2_pagetable()  ((l2_pgentry_t *)get_free_page(GFP_KERNEL))
-
 /* Add type to a table entry. */
 #define mk_l1_pgentry(_x)  ( (l1_pgentry_t) { (_x) } )
 #define mk_l2_pgentry(_x)  ( (l2_pgentry_t) { (_x) } )
index cb8651ec8a2a7bf334acf6a0d1430e0e4b3bfba7..919fd2d80a89c4b582712231385818144d561add 100644 (file)
@@ -67,11 +67,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
 #define l4_pgentry_val(_x) ((_x).l4_lo)
 #define pagetable_val(_x)  ((_x).pt_lo)
 
-#define alloc_l1_pagetable()  ((l1_pgentry_t *)get_free_page(GFP_KERNEL))
-#define alloc_l2_pagetable()  ((l2_pgentry_t *)get_free_page(GFP_KERNEL))
-#define alloc_l3_pagetable()  ((l3_pgentry_t *)get_free_page(GFP_KERNEL))
-#define alloc_l4_pagetable()  ((l4_pgentry_t *)get_free_page(GFP_KERNEL))
-
 /* Add type to a table entry. */
 #define mk_l1_pgentry(_x)  ( (l1_pgentry_t) { (_x) } )
 #define mk_l2_pgentry(_x)  ( (l2_pgentry_t) { (_x) } )
index f23ad5f3f4690ad4a1a3c8515cfb1c65a57b1943..ae6e66817d3486ee5a0c0fc1870456e7015b2d84 100644 (file)
 
 #include <hypervisor-ifs/hypervisor-if.h>
 
-/*
- * These are for compatibility with calls to the Linux memory allocators.
- */
-
-#define __GFP_DMA       0x01
-#define GFP_DMA         __GFP_DMA
-#define __GFP_WAIT      0x10    /* Can wait and reschedule? */
-#define __GFP_HIGH      0x20    /* Should access emergency pools? */
-#define __GFP_IO        0x40    /* Can start low memory physical IO? */
-#define __GFP_HIGHIO    0x80    /* Can start high mem physical IO? */
-#define __GFP_FS        0x100   /* Can call down to low-level FS? */
-#define GFP_ATOMIC      (__GFP_HIGH)
-#define GFP_KERNEL      (__GFP_HIGH | __GFP_WAIT | __GFP_IO | \
-                         __GFP_HIGHIO | __GFP_FS)
-
 /*
  * The following is for page_alloc.c.
  */
 
 void init_page_allocator(unsigned long min, unsigned long max);
-unsigned long __get_free_pages(int mask, int order);
+unsigned long __get_free_pages(int order);
 void __free_pages(unsigned long p, int order);
-#define get_free_page(_m) (__get_free_pages((_m),0))
-#define __get_free_page(_m) (__get_free_pages((_m),0))
+#define get_free_page()   (__get_free_pages(0))
+#define __get_free_page() (__get_free_pages(0))
 #define free_pages(_p,_o) (__free_pages(_p,_o))
-#define free_page(_p) (__free_pages(_p,0))
+#define free_page(_p)     (__free_pages(_p,0))
 
 
 /*
index b97139e11f85730370b3cc80a7dd3f5739d5e7a8..3bb10195df5234ae77490a3db1e688f48bdaafba 100644 (file)
@@ -505,13 +505,13 @@ static inline void set_shadow_status( struct mm_struct *m,
         SH_LOG("allocate more shadow hashtable blocks");
 
         // we need to allocate more space
-        extra = kmalloc( sizeof(void*) + (shadow_ht_extra_size * 
-                                          sizeof(struct shadow_status)), GFP_KERNEL );
+        extra = kmalloc(sizeof(void*) + (shadow_ht_extra_size * 
+                                         sizeof(struct shadow_status)));
 
         if( ! extra ) BUG(); // should be more graceful here....
 
-        memset( extra, 0, sizeof(void*) + (shadow_ht_extra_size * 
-                                           sizeof(struct shadow_status)) );
+        memset(extra, 0, sizeof(void*) + (shadow_ht_extra_size * 
+                                          sizeof(struct shadow_status)));
 
         m->shadow_extras_count++;
 
index 5e1feb2a85fe434132fb589e846203a20a802809..7df322e12e8514eb75ba8981ac4bae48ee242955 100644 (file)
@@ -4,55 +4,44 @@
  */
 
 #ifndef __SLAB_H__
-#define        __SLAB_H__
+#define __SLAB_H__
 
 typedef struct kmem_cache_s kmem_cache_t;
 
 #include <xen/mm.h>
 #include <xen/cache.h>
 
-/* flags for kmem_cache_alloc() */
-#define        SLAB_ATOMIC             GFP_ATOMIC
-#define        SLAB_KERNEL             GFP_KERNEL
-#define        SLAB_DMA                GFP_DMA
+/* Flags to pass to kmem_cache_create(). */
+/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
+#define SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor */
+#define SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a cache */
+#define SLAB_POISON             0x00000800UL    /* Poison objects */
+#define SLAB_NO_REAP            0x00001000UL    /* never reap from the cache */
+#define SLAB_HWCACHE_ALIGN      0x00002000UL    /* align obj on a cache line */
 
-#define SLAB_LEVEL_MASK                (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
-#define        SLAB_NO_GROW            0x00001000UL    /* don't grow a cache */
+/* Flags passed to a constructor function. */
+#define SLAB_CTOR_CONSTRUCTOR   0x001UL /* if not set, then deconstructor */
+#define SLAB_CTOR_ATOMIC        0x002UL /* tell cons. it can't sleep */
+#define SLAB_CTOR_VERIFY        0x004UL /* tell cons. it's a verify call */
 
-/* flags to pass to kmem_cache_create().
- * The first 3 are only valid when the allocator as been build
- * SLAB_DEBUG_SUPPORT.
- */
-#define        SLAB_DEBUG_INITIAL      0x00000200UL    /* Call constructor (as verifier) */
-#define        SLAB_RED_ZONE           0x00000400UL    /* Red zone objs in a cache */
-#define        SLAB_POISON             0x00000800UL    /* Poison objects */
-#define        SLAB_NO_REAP            0x00001000UL    /* never reap from the cache */
-#define        SLAB_HWCACHE_ALIGN      0x00002000UL    /* align objs on a h/w cache lines */
-#define SLAB_CACHE_DMA         0x00004000UL    /* use GFP_DMA memory */
-
-/* flags passed to a constructor func */
-#define        SLAB_CTOR_CONSTRUCTOR   0x001UL         /* if not set, then deconstructor */
-#define SLAB_CTOR_ATOMIC       0x002UL         /* tell constructor it can't sleep */
-#define        SLAB_CTOR_VERIFY        0x004UL         /* tell constructor it's a verify call */
-
-/* prototypes */
 extern void kmem_cache_init(void);
 extern void kmem_cache_sizes_init(unsigned long);
 
-extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
-                                      void (*)(void *, kmem_cache_t *, unsigned long),
-                                      void (*)(void *, kmem_cache_t *, unsigned long));
+extern kmem_cache_t *kmem_find_general_cachep(size_t);
+extern kmem_cache_t *kmem_cache_create(
+    const char *, size_t, size_t, unsigned long,
+    void (*)(void *, kmem_cache_t *, unsigned long),
+    void (*)(void *, kmem_cache_t *, unsigned long));
 extern int kmem_cache_destroy(kmem_cache_t *);
 extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, int);
+extern void *kmem_cache_alloc(kmem_cache_t *);
 extern void kmem_cache_free(kmem_cache_t *, void *);
 
-extern void *kmalloc(size_t, int);
+extern void *kmalloc(size_t);
 extern void kfree(const void *);
 
-extern int FASTCALL(kmem_cache_reap(int));
+extern int kmem_cache_reap(void);
 
 extern void dump_slabinfo();
 
-#endif /* __SLAB_H__ */
+#endif /* __SLAB_H__ */